[llvm] [RISCV] Omit "@plt" in assembler output "call foo at plt" (PR #72467)

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 7 12:05:45 PST 2024


https://github.com/MaskRay updated https://github.com/llvm/llvm-project/pull/72467

>From 0e554b4f65b69276ce79a4187d683ba3dccbd295 Mon Sep 17 00:00:00 2001
From: Fangrui Song <i at maskray.me>
Date: Wed, 15 Nov 2023 18:07:57 -0800
Subject: [PATCH] [RISCV] Omit "@plt" in assembly output "call foo at plt"

R_RISCV_CALL/R_RISCV_CALL_PLT distinction is not necessary and
R_RISCV_CALL has been deprecated. Since https://reviews.llvm.org/D132530
`call foo` assembles to R_RISCV_CALL_PLT. The `@plt` suffix is not
useful and can be removed now (matching AArch64 and PowerPC).

GNU assembler assembles `call foo` to RISCV_CALL_PLT since 2022-09
(70f35d72ef04cd23771875c1661c9975044a749c).

Without this patch, unconditionally changing MO_CALL to MO_PLT could
create `jump .L1 at plt, a0`, which is invalid in LLVM integrated assembler
and GNU assembler.
---
 .../Target/RISCV/AsmParser/RISCVAsmParser.cpp |    5 +-
 .../Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp |    2 -
 llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll  |   24 +-
 llvm/test/CodeGen/RISCV/addrspacecast.ll      |    4 +-
 llvm/test/CodeGen/RISCV/aext-to-sext.ll       |    2 +-
 llvm/test/CodeGen/RISCV/alloca.ll             |    6 +-
 llvm/test/CodeGen/RISCV/analyze-branch.ll     |    8 +-
 llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll     |  180 +--
 llvm/test/CodeGen/RISCV/atomic-load-store.ll  |  144 +--
 llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll |   18 +-
 llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll     |   20 +-
 llvm/test/CodeGen/RISCV/atomic-rmw.ll         | 1070 ++++++++---------
 llvm/test/CodeGen/RISCV/atomic-signext.ll     |  278 ++---
 .../CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll |   36 +-
 llvm/test/CodeGen/RISCV/bf16-promote.ll       |   14 +-
 llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll     |   68 +-
 llvm/test/CodeGen/RISCV/bfloat-convert.ll     |   74 +-
 llvm/test/CodeGen/RISCV/bfloat-frem.ll        |    4 +-
 llvm/test/CodeGen/RISCV/bfloat-mem.ll         |    8 +-
 llvm/test/CodeGen/RISCV/bfloat.ll             |   76 +-
 llvm/test/CodeGen/RISCV/bittest.ll            |  200 +--
 llvm/test/CodeGen/RISCV/byval.ll              |    2 +-
 .../test/CodeGen/RISCV/callee-saved-fpr32s.ll |   12 +-
 .../test/CodeGen/RISCV/callee-saved-fpr64s.ll |    8 +-
 llvm/test/CodeGen/RISCV/callee-saved-gprs.ll  |   32 +-
 llvm/test/CodeGen/RISCV/calling-conv-half.ll  |   96 +-
 .../RISCV/calling-conv-ilp32-ilp32f-common.ll |   16 +-
 ...calling-conv-ilp32-ilp32f-ilp32d-common.ll |   48 +-
 llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll |   16 +-
 .../test/CodeGen/RISCV/calling-conv-ilp32d.ll |   12 +-
 .../calling-conv-ilp32f-ilp32d-common.ll      |   10 +-
 .../RISCV/calling-conv-lp64-lp64f-common.ll   |    6 +-
 .../calling-conv-lp64-lp64f-lp64d-common.ll   |   24 +-
 llvm/test/CodeGen/RISCV/calling-conv-lp64.ll  |   16 +-
 .../CodeGen/RISCV/calling-conv-rv32f-ilp32.ll |    4 +-
 .../CodeGen/RISCV/calling-conv-sext-zext.ll   |   36 +-
 .../RISCV/calling-conv-vector-on-stack.ll     |    2 +-
 llvm/test/CodeGen/RISCV/calls.ll              |   20 +-
 llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll       |   48 +-
 llvm/test/CodeGen/RISCV/condops.ll            |   36 +-
 llvm/test/CodeGen/RISCV/copysign-casts.ll     |   10 +-
 llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll    |   60 +-
 .../CodeGen/RISCV/ctz_zero_return_test.ll     |   50 +-
 llvm/test/CodeGen/RISCV/div-by-constant.ll    |   10 +-
 llvm/test/CodeGen/RISCV/div.ll                |  112 +-
 .../test/CodeGen/RISCV/double-arith-strict.ll |   96 +-
 llvm/test/CodeGen/RISCV/double-arith.ll       |  160 +--
 llvm/test/CodeGen/RISCV/double-br-fcmp.ll     |  136 +--
 .../test/CodeGen/RISCV/double-calling-conv.ll |   12 +-
 .../CodeGen/RISCV/double-convert-strict.ll    |   92 +-
 llvm/test/CodeGen/RISCV/double-convert.ll     |  264 ++--
 llvm/test/CodeGen/RISCV/double-fcmp-strict.ll |  128 +-
 llvm/test/CodeGen/RISCV/double-fcmp.ll        |   64 +-
 llvm/test/CodeGen/RISCV/double-frem.ll        |    8 +-
 .../CodeGen/RISCV/double-intrinsics-strict.ll |  284 ++---
 llvm/test/CodeGen/RISCV/double-intrinsics.ll  |  230 ++--
 llvm/test/CodeGen/RISCV/double-mem.ll         |   16 +-
 .../CodeGen/RISCV/double-previous-failure.ll  |   12 +-
 .../CodeGen/RISCV/double-round-conv-sat.ll    |   96 +-
 llvm/test/CodeGen/RISCV/double-round-conv.ll  |  100 +-
 .../RISCV/double-stack-spill-restore.ll       |    8 +-
 llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll       |    4 +-
 llvm/test/CodeGen/RISCV/emutls.ll             |   12 +-
 .../RISCV/exception-pointer-register.ll       |   12 +-
 llvm/test/CodeGen/RISCV/fastcc-float.ll       |    2 +-
 llvm/test/CodeGen/RISCV/fastcc-int.ll         |    4 +-
 .../CodeGen/RISCV/fastcc-without-f-reg.ll     |   24 +-
 llvm/test/CodeGen/RISCV/fli-licm.ll           |    4 +-
 llvm/test/CodeGen/RISCV/float-arith-strict.ll |   96 +-
 llvm/test/CodeGen/RISCV/float-arith.ll        |  164 +--
 .../RISCV/float-bit-preserving-dagcombines.ll |   48 +-
 llvm/test/CodeGen/RISCV/float-br-fcmp.ll      |  160 +--
 .../CodeGen/RISCV/float-convert-strict.ll     |   84 +-
 llvm/test/CodeGen/RISCV/float-convert.ll      |  256 ++--
 llvm/test/CodeGen/RISCV/float-fcmp-strict.ll  |  128 +-
 llvm/test/CodeGen/RISCV/float-fcmp.ll         |   64 +-
 llvm/test/CodeGen/RISCV/float-frem.ll         |   12 +-
 .../CodeGen/RISCV/float-intrinsics-strict.ll  |  284 ++---
 llvm/test/CodeGen/RISCV/float-intrinsics.ll   |  220 ++--
 llvm/test/CodeGen/RISCV/float-mem.ll          |   16 +-
 .../CodeGen/RISCV/float-round-conv-sat.ll     |   48 +-
 llvm/test/CodeGen/RISCV/float-round-conv.ll   |   40 +-
 llvm/test/CodeGen/RISCV/float-zfa.ll          |    2 +-
 llvm/test/CodeGen/RISCV/fmax-fmin.ll          |   36 +-
 .../test/CodeGen/RISCV/fold-addi-loadstore.ll |    8 +-
 llvm/test/CodeGen/RISCV/forced-atomics.ll     |  584 ++++-----
 llvm/test/CodeGen/RISCV/fp128.ll              |    6 +-
 llvm/test/CodeGen/RISCV/fp16-promote.ll       |   20 +-
 llvm/test/CodeGen/RISCV/fpclamptosat.ll       |  212 ++--
 llvm/test/CodeGen/RISCV/frame-info.ll         |   24 +-
 llvm/test/CodeGen/RISCV/frame.ll              |    4 +-
 .../CodeGen/RISCV/frameaddr-returnaddr.ll     |    4 +-
 llvm/test/CodeGen/RISCV/ghccc-rv32.ll         |    2 +-
 llvm/test/CodeGen/RISCV/ghccc-rv64.ll         |    2 +-
 .../test/CodeGen/RISCV/ghccc-without-f-reg.ll |    4 +-
 llvm/test/CodeGen/RISCV/half-arith.ll         |  704 +++++------
 llvm/test/CodeGen/RISCV/half-br-fcmp.ll       |  272 ++---
 .../test/CodeGen/RISCV/half-convert-strict.ll |   88 +-
 llvm/test/CodeGen/RISCV/half-convert.ll       |  874 +++++++-------
 llvm/test/CodeGen/RISCV/half-frem.ll          |   16 +-
 llvm/test/CodeGen/RISCV/half-intrinsics.ll    |  536 ++++-----
 llvm/test/CodeGen/RISCV/half-mem.ll           |   32 +-
 .../test/CodeGen/RISCV/half-round-conv-sat.ll |   96 +-
 llvm/test/CodeGen/RISCV/half-round-conv.ll    |   80 +-
 .../CodeGen/RISCV/hoist-global-addr-base.ll   |    4 +-
 .../CodeGen/RISCV/interrupt-attr-callee.ll    |   18 +-
 .../CodeGen/RISCV/interrupt-attr-nocall.ll    |   12 +-
 llvm/test/CodeGen/RISCV/interrupt-attr.ll     |   24 +-
 .../RISCV/intrinsic-cttz-elts-vscale.ll       |    2 +-
 llvm/test/CodeGen/RISCV/libcall-tail-calls.ll |  136 +--
 llvm/test/CodeGen/RISCV/llvm.exp10.ll         |  212 ++--
 llvm/test/CodeGen/RISCV/llvm.frexp.ll         |  348 +++---
 ...e-outliner-and-machine-copy-propagation.ll |   20 +-
 .../CodeGen/RISCV/machine-outliner-throw.ll   |    8 +-
 .../RISCV/machinelicm-address-pseudos.ll      |    4 +-
 .../CodeGen/RISCV/macro-fusion-lui-addi.ll    |    6 +-
 llvm/test/CodeGen/RISCV/mem.ll                |    2 +-
 llvm/test/CodeGen/RISCV/mem64.ll              |    2 +-
 llvm/test/CodeGen/RISCV/memcpy.ll             |   20 +-
 llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll |    8 +-
 llvm/test/CodeGen/RISCV/mul.ll                |   54 +-
 llvm/test/CodeGen/RISCV/nest-register.ll      |    6 +-
 llvm/test/CodeGen/RISCV/nomerge.ll            |   10 +-
 .../RISCV/out-of-reach-emergency-slot.mir     |    4 +-
 .../test/CodeGen/RISCV/overflow-intrinsics.ll |    8 +-
 llvm/test/CodeGen/RISCV/pr51206.ll            |    2 +-
 llvm/test/CodeGen/RISCV/pr63816.ll            |   16 +-
 llvm/test/CodeGen/RISCV/push-pop-popret.ll    |  144 +--
 .../RISCV/reduce-unnecessary-extension.ll     |   16 +-
 ...regalloc-last-chance-recoloring-failure.ll |    4 +-
 llvm/test/CodeGen/RISCV/rem.ll                |   72 +-
 llvm/test/CodeGen/RISCV/remat.ll              |    8 +-
 .../CodeGen/RISCV/rv32i-rv64i-float-double.ll |   16 +-
 llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll   |   28 +-
 llvm/test/CodeGen/RISCV/rv32xtheadbb.ll       |   12 +-
 llvm/test/CodeGen/RISCV/rv32zbb.ll            |   30 +-
 llvm/test/CodeGen/RISCV/rv64-large-stack.ll   |    2 +-
 llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll |   50 +-
 .../CodeGen/RISCV/rv64-legal-i32/mem64.ll     |    2 +-
 llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll |   32 +-
 .../RISCV/rv64-legal-i32/rv64xtheadbb.ll      |   30 +-
 .../CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll   |   28 +-
 .../CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll   |    4 +-
 .../test/CodeGen/RISCV/rv64i-complex-float.ll |    4 +-
 .../CodeGen/RISCV/rv64i-double-softfloat.ll   |    8 +-
 .../CodeGen/RISCV/rv64i-single-softfloat.ll   |    4 +-
 llvm/test/CodeGen/RISCV/rv64xtheadbb.ll       |   22 +-
 llvm/test/CodeGen/RISCV/rv64zbb.ll            |   36 +-
 llvm/test/CodeGen/RISCV/rv64zbs.ll            |    4 +-
 .../CodeGen/RISCV/rvv/calling-conv-fastcc.ll  |   12 +-
 llvm/test/CodeGen/RISCV/rvv/calling-conv.ll   |    4 +-
 .../rvv/fixed-vectors-calling-conv-fastcc.ll  |   16 +-
 .../RISCV/rvv/fixed-vectors-calling-conv.ll   |   40 +-
 .../rvv/fixed-vectors-emergency-slot.mir      |    2 +-
 .../RISCV/rvv/fixed-vectors-extract.ll        |    4 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-llrint.ll |  100 +-
 .../rvv/fixed-vectors-reduction-int-vp.ll     |   28 +-
 .../CodeGen/RISCV/rvv/fpclamptosat_vec.ll     |  480 ++++----
 .../RISCV/rvv/large-rvv-stack-size.mir        |    2 +-
 llvm/test/CodeGen/RISCV/rvv/localvar.ll       |    4 +-
 llvm/test/CodeGen/RISCV/rvv/memory-args.ll    |    2 +-
 .../CodeGen/RISCV/rvv/no-reserved-frame.ll    |    2 +-
 llvm/test/CodeGen/RISCV/rvv/pr63596.ll        |    8 +-
 .../CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll |    2 +-
 .../RISCV/rvv/rv32-spill-vector-csr.ll        |    4 +-
 .../RISCV/rvv/rv64-spill-vector-csr.ll        |    6 +-
 .../test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll |    2 +-
 .../CodeGen/RISCV/rvv/rvv-stack-align.mir     |   12 +-
 .../CodeGen/RISCV/rvv/scalar-stack-align.ll   |    4 +-
 .../RISCV/rvv/vsetvli-insert-crossbb.ll       |    4 +-
 llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll    |    4 +-
 llvm/test/CodeGen/RISCV/select-and.ll         |    8 +-
 llvm/test/CodeGen/RISCV/select-cc.ll          |    4 +-
 llvm/test/CodeGen/RISCV/select-or.ll          |    8 +-
 llvm/test/CodeGen/RISCV/setcc-logic.ll        |  112 +-
 llvm/test/CodeGen/RISCV/sextw-removal.ll      |   64 +-
 llvm/test/CodeGen/RISCV/shadowcallstack.ll    |   28 +-
 llvm/test/CodeGen/RISCV/shifts.ll             |    6 +-
 .../CodeGen/RISCV/short-forward-branch-opt.ll |   16 +-
 .../CodeGen/RISCV/shrinkwrap-jump-table.ll    |   12 +-
 llvm/test/CodeGen/RISCV/shrinkwrap.ll         |   16 +-
 llvm/test/CodeGen/RISCV/split-sp-adjust.ll    |    4 +-
 .../CodeGen/RISCV/split-udiv-by-constant.ll   |    8 +-
 .../CodeGen/RISCV/split-urem-by-constant.ll   |    8 +-
 llvm/test/CodeGen/RISCV/srem-lkk.ll           |   30 +-
 .../CodeGen/RISCV/srem-seteq-illegal-types.ll |   32 +-
 llvm/test/CodeGen/RISCV/srem-vector-lkk.ll    |  110 +-
 .../CodeGen/RISCV/stack-protector-target.ll   |    4 +-
 ...realignment-with-variable-sized-objects.ll |    4 +-
 llvm/test/CodeGen/RISCV/stack-realignment.ll  |   64 +-
 llvm/test/CodeGen/RISCV/stack-slot-size.ll    |   12 +-
 llvm/test/CodeGen/RISCV/stack-store-check.ll  |   24 +-
 llvm/test/CodeGen/RISCV/tls-models.ll         |    8 +-
 ...unfold-masked-merge-scalar-variablemask.ll |    8 +-
 llvm/test/CodeGen/RISCV/urem-lkk.ll           |   22 +-
 .../CodeGen/RISCV/urem-seteq-illegal-types.ll |   24 +-
 llvm/test/CodeGen/RISCV/urem-vector-lkk.ll    |  102 +-
 llvm/test/CodeGen/RISCV/vararg.ll             |   60 +-
 llvm/test/CodeGen/RISCV/vlenb.ll              |    4 +-
 llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll    |    8 +-
 llvm/test/CodeGen/RISCV/zcmp-with-float.ll    |   12 +-
 .../RISCV/zfh-half-intrinsics-strict.ll       |   96 +-
 .../RISCV/zfhmin-half-intrinsics-strict.ll    |   96 +-
 llvm/test/MC/RISCV/function-call.s            |    4 +-
 llvm/test/MC/RISCV/tail-call.s                |    2 +-
 205 files changed, 6581 insertions(+), 6584 deletions(-)

diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 23c2b63c8c832f..d616aaeddf4114 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -2035,9 +2035,8 @@ ParseStatus RISCVAsmParser::parseCallSymbol(OperandVector &Operands) {
 
   SMLoc E = SMLoc::getFromPointer(S.getPointer() + Identifier.size());
 
-  RISCVMCExpr::VariantKind Kind = RISCVMCExpr::VK_RISCV_CALL;
-  if (Identifier.consume_back("@plt"))
-    Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
+  RISCVMCExpr::VariantKind Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
+  (void)Identifier.consume_back("@plt");
 
   MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
   Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
index d67351102bc1cd..64ddae61b1bc15 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
@@ -41,8 +41,6 @@ void RISCVMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
   if (HasVariant)
     OS << '%' << getVariantKindName(getKind()) << '(';
   Expr->print(OS, MAI);
-  if (Kind == VK_RISCV_CALL_PLT)
-    OS << "@plt";
   if (HasVariant)
     OS << ')';
 }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index 501a3c0ce74380..7b110e562e0533 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -167,7 +167,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; RV32-NEXT:    andi a0, a0, -16
 ; RV32-NEXT:    sub a0, sp, a0
 ; RV32-NEXT:    mv sp, a0
-; RV32-NEXT:    call notdead at plt
+; RV32-NEXT:    call notdead
 ; RV32-NEXT:    mv a0, s1
 ; RV32-NEXT:    addi sp, s0, -16
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -204,7 +204,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; RV64-NEXT:    andi a0, a0, -16
 ; RV64-NEXT:    sub a0, sp, a0
 ; RV64-NEXT:    mv sp, a0
-; RV64-NEXT:    call notdead at plt
+; RV64-NEXT:    call notdead
 ; RV64-NEXT:    mv a0, s1
 ; RV64-NEXT:    addi sp, s0, -32
 ; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -229,7 +229,7 @@ define void @va1_caller() nounwind {
 ; RV32-NEXT:    lui a3, 261888
 ; RV32-NEXT:    li a4, 2
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call va1 at plt
+; RV32-NEXT:    call va1
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -241,7 +241,7 @@ define void @va1_caller() nounwind {
 ; LP64-NEXT:    lui a0, %hi(.LCPI3_0)
 ; LP64-NEXT:    ld a1, %lo(.LCPI3_0)(a0)
 ; LP64-NEXT:    li a2, 2
-; LP64-NEXT:    call va1 at plt
+; LP64-NEXT:    call va1
 ; LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    addi sp, sp, 16
 ; LP64-NEXT:    ret
@@ -255,7 +255,7 @@ define void @va1_caller() nounwind {
 ; LP64F-NEXT:    fmv.d.x fa5, a0
 ; LP64F-NEXT:    li a2, 2
 ; LP64F-NEXT:    fmv.x.d a1, fa5
-; LP64F-NEXT:    call va1 at plt
+; LP64F-NEXT:    call va1
 ; LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64F-NEXT:    addi sp, sp, 16
 ; LP64F-NEXT:    ret
@@ -269,7 +269,7 @@ define void @va1_caller() nounwind {
 ; LP64D-NEXT:    fmv.d.x fa5, a0
 ; LP64D-NEXT:    li a2, 2
 ; LP64D-NEXT:    fmv.x.d a1, fa5
-; LP64D-NEXT:    call va1 at plt
+; LP64D-NEXT:    call va1
 ; LP64D-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    addi sp, sp, 16
 ; LP64D-NEXT:    ret
@@ -473,7 +473,7 @@ define void @va2_caller() nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a1, 1
-; RV32-NEXT:    call va2 at plt
+; RV32-NEXT:    call va2
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -483,7 +483,7 @@ define void @va2_caller() nounwind {
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    li a1, 1
-; RV64-NEXT:    call va2 at plt
+; RV64-NEXT:    call va2
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -701,7 +701,7 @@ define void @va3_caller() nounwind {
 ; RV32-NEXT:    li a0, 2
 ; RV32-NEXT:    li a1, 1111
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call va3 at plt
+; RV32-NEXT:    call va3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -714,7 +714,7 @@ define void @va3_caller() nounwind {
 ; RV64-NEXT:    addiw a2, a0, -480
 ; RV64-NEXT:    li a0, 2
 ; RV64-NEXT:    li a1, 1111
-; RV64-NEXT:    call va3 at plt
+; RV64-NEXT:    call va3
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -749,7 +749,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; RV32-NEXT:    lw s0, 0(a0)
 ; RV32-NEXT:    sw a2, 0(a1)
 ; RV32-NEXT:    lw a0, 0(sp)
-; RV32-NEXT:    call notdead at plt
+; RV32-NEXT:    call notdead
 ; RV32-NEXT:    lw a0, 4(sp)
 ; RV32-NEXT:    addi a0, a0, 3
 ; RV32-NEXT:    andi a0, a0, -4
@@ -803,7 +803,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; RV64-NEXT:    lwu a1, 0(sp)
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    call notdead at plt
+; RV64-NEXT:    call notdead
 ; RV64-NEXT:    ld a0, 8(sp)
 ; RV64-NEXT:    addi a0, a0, 3
 ; RV64-NEXT:    andi a0, a0, -4
diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll
index 7fe041a8ac6a10..e55a57a5167822 100644
--- a/llvm/test/CodeGen/RISCV/addrspacecast.ll
+++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll
@@ -26,7 +26,7 @@ define void @cast1(ptr %ptr) {
 ; RV32I-NEXT:    .cfi_def_cfa_offset 16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -37,7 +37,7 @@ define void @cast1(ptr %ptr) {
 ; RV64I-NEXT:    .cfi_def_cfa_offset 16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
-; RV64I-NEXT:    call foo at plt
+; RV64I-NEXT:    call foo
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
index 09803012001c26..888ea666d71316 100644
--- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll
+++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
@@ -19,7 +19,7 @@ define void @quux(i32 signext %arg, i32 signext %arg1) nounwind {
 ; RV64I-NEXT:    subw s0, a1, a0
 ; RV64I-NEXT:  .LBB0_2: # %bb2
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT:    call hoge at plt
+; RV64I-NEXT:    call hoge
 ; RV64I-NEXT:    addiw s0, s0, -1
 ; RV64I-NEXT:    bnez s0, .LBB0_2
 ; RV64I-NEXT:  # %bb.3:
diff --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll
index 34cac429f30533..bcb0592c18f59f 100644
--- a/llvm/test/CodeGen/RISCV/alloca.ll
+++ b/llvm/test/CodeGen/RISCV/alloca.ll
@@ -18,7 +18,7 @@ define void @simple_alloca(i32 %n) nounwind {
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call notdead at plt
+; RV32I-NEXT:    call notdead
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -45,7 +45,7 @@ define void @scoped_alloca(i32 %n) nounwind {
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call notdead at plt
+; RV32I-NEXT:    call notdead
 ; RV32I-NEXT:    mv sp, s1
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -91,7 +91,7 @@ define void @alloca_callframe(i32 %n) nounwind {
 ; RV32I-NEXT:    li a6, 7
 ; RV32I-NEXT:    li a7, 8
 ; RV32I-NEXT:    sw t0, 0(sp)
-; RV32I-NEXT:    call func at plt
+; RV32I-NEXT:    call func
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/analyze-branch.ll b/llvm/test/CodeGen/RISCV/analyze-branch.ll
index e33e6b65c6f14d..768a11a717063d 100644
--- a/llvm/test/CodeGen/RISCV/analyze-branch.ll
+++ b/llvm/test/CodeGen/RISCV/analyze-branch.ll
@@ -20,13 +20,13 @@ define void @test_bcc_fallthrough_taken(i32 %in) nounwind {
 ; RV32I-NEXT:    li a1, 42
 ; RV32I-NEXT:    bne a0, a1, .LBB0_3
 ; RV32I-NEXT:  # %bb.1: # %true
-; RV32I-NEXT:    call test_true at plt
+; RV32I-NEXT:    call test_true
 ; RV32I-NEXT:  .LBB0_2: # %true
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_3: # %false
-; RV32I-NEXT:    call test_false at plt
+; RV32I-NEXT:    call test_false
 ; RV32I-NEXT:    j .LBB0_2
   %tst = icmp eq i32 %in, 42
   br i1 %tst, label %true, label %false, !prof !0
@@ -52,13 +52,13 @@ define void @test_bcc_fallthrough_nottaken(i32 %in) nounwind {
 ; RV32I-NEXT:    li a1, 42
 ; RV32I-NEXT:    beq a0, a1, .LBB1_3
 ; RV32I-NEXT:  # %bb.1: # %false
-; RV32I-NEXT:    call test_false at plt
+; RV32I-NEXT:    call test_false
 ; RV32I-NEXT:  .LBB1_2: # %true
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB1_3: # %true
-; RV32I-NEXT:    call test_true at plt
+; RV32I-NEXT:    call test_true
 ; RV32I-NEXT:    j .LBB1_2
   %tst = icmp eq i32 %in, 42
   br i1 %tst, label %true, label %false, !prof !1
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index eea4cb72938af2..46ed01b11584f9 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -21,7 +21,7 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -57,7 +57,7 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -97,7 +97,7 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -156,7 +156,7 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -219,7 +219,7 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -278,7 +278,7 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -341,7 +341,7 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -400,7 +400,7 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -463,7 +463,7 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -522,7 +522,7 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -585,7 +585,7 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -644,7 +644,7 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -707,7 +707,7 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -766,7 +766,7 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -829,7 +829,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -865,7 +865,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -905,7 +905,7 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -941,7 +941,7 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -981,7 +981,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1017,7 +1017,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1057,7 +1057,7 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1094,7 +1094,7 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1135,7 +1135,7 @@ define void @cmpxchg_i16_acquire_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1196,7 +1196,7 @@ define void @cmpxchg_i16_acquire_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1261,7 +1261,7 @@ define void @cmpxchg_i16_acquire_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1322,7 +1322,7 @@ define void @cmpxchg_i16_acquire_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1387,7 +1387,7 @@ define void @cmpxchg_i16_release_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1448,7 +1448,7 @@ define void @cmpxchg_i16_release_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1513,7 +1513,7 @@ define void @cmpxchg_i16_release_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1574,7 +1574,7 @@ define void @cmpxchg_i16_release_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1639,7 +1639,7 @@ define void @cmpxchg_i16_acq_rel_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1700,7 +1700,7 @@ define void @cmpxchg_i16_acq_rel_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1765,7 +1765,7 @@ define void @cmpxchg_i16_acq_rel_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1826,7 +1826,7 @@ define void @cmpxchg_i16_acq_rel_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1891,7 +1891,7 @@ define void @cmpxchg_i16_seq_cst_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1928,7 +1928,7 @@ define void @cmpxchg_i16_seq_cst_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1969,7 +1969,7 @@ define void @cmpxchg_i16_seq_cst_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2006,7 +2006,7 @@ define void @cmpxchg_i16_seq_cst_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2047,7 +2047,7 @@ define void @cmpxchg_i16_seq_cst_seq_cst(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2084,7 +2084,7 @@ define void @cmpxchg_i16_seq_cst_seq_cst(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2125,7 +2125,7 @@ define void @cmpxchg_i32_monotonic_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounw
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2149,7 +2149,7 @@ define void @cmpxchg_i32_monotonic_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounw
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2178,7 +2178,7 @@ define void @cmpxchg_i32_acquire_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2213,7 +2213,7 @@ define void @cmpxchg_i32_acquire_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2254,7 +2254,7 @@ define void @cmpxchg_i32_acquire_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2289,7 +2289,7 @@ define void @cmpxchg_i32_acquire_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2330,7 +2330,7 @@ define void @cmpxchg_i32_release_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2365,7 +2365,7 @@ define void @cmpxchg_i32_release_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2406,7 +2406,7 @@ define void @cmpxchg_i32_release_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2441,7 +2441,7 @@ define void @cmpxchg_i32_release_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2482,7 +2482,7 @@ define void @cmpxchg_i32_acq_rel_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2517,7 +2517,7 @@ define void @cmpxchg_i32_acq_rel_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2558,7 +2558,7 @@ define void @cmpxchg_i32_acq_rel_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2593,7 +2593,7 @@ define void @cmpxchg_i32_acq_rel_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2634,7 +2634,7 @@ define void @cmpxchg_i32_seq_cst_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2658,7 +2658,7 @@ define void @cmpxchg_i32_seq_cst_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2687,7 +2687,7 @@ define void @cmpxchg_i32_seq_cst_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2711,7 +2711,7 @@ define void @cmpxchg_i32_seq_cst_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2740,7 +2740,7 @@ define void @cmpxchg_i32_seq_cst_seq_cst(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2764,7 +2764,7 @@ define void @cmpxchg_i32_seq_cst_seq_cst(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2796,7 +2796,7 @@ define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounw
 ; RV32I-NEXT:    mv a3, a4
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2812,7 +2812,7 @@ define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounw
 ; RV32IA-NEXT:    mv a3, a4
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -2825,7 +2825,7 @@ define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounw
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2857,7 +2857,7 @@ define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2874,7 +2874,7 @@ define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -2887,7 +2887,7 @@ define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2930,7 +2930,7 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2947,7 +2947,7 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -2960,7 +2960,7 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3003,7 +3003,7 @@ define void @cmpxchg_i64_release_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3020,7 +3020,7 @@ define void @cmpxchg_i64_release_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3033,7 +3033,7 @@ define void @cmpxchg_i64_release_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3076,7 +3076,7 @@ define void @cmpxchg_i64_release_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3093,7 +3093,7 @@ define void @cmpxchg_i64_release_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3106,7 +3106,7 @@ define void @cmpxchg_i64_release_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3149,7 +3149,7 @@ define void @cmpxchg_i64_acq_rel_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3166,7 +3166,7 @@ define void @cmpxchg_i64_acq_rel_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3179,7 +3179,7 @@ define void @cmpxchg_i64_acq_rel_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3222,7 +3222,7 @@ define void @cmpxchg_i64_acq_rel_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3239,7 +3239,7 @@ define void @cmpxchg_i64_acq_rel_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3252,7 +3252,7 @@ define void @cmpxchg_i64_acq_rel_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3295,7 +3295,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3312,7 +3312,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3325,7 +3325,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3357,7 +3357,7 @@ define void @cmpxchg_i64_seq_cst_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3374,7 +3374,7 @@ define void @cmpxchg_i64_seq_cst_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3387,7 +3387,7 @@ define void @cmpxchg_i64_seq_cst_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3419,7 +3419,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3436,7 +3436,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3449,7 +3449,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
index d3488ebed89f60..2d1fc21cda89b0 100644
--- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
@@ -30,7 +30,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    call __atomic_load_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -45,7 +45,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    call __atomic_load_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -64,7 +64,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    call __atomic_load_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -79,7 +79,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    call __atomic_load_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -98,7 +98,7 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 2
-; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    call __atomic_load_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -119,7 +119,7 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 2
-; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    call __atomic_load_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -166,7 +166,7 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    call __atomic_load_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -189,7 +189,7 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    call __atomic_load_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -242,7 +242,7 @@ define i16 @atomic_load_i16_unordered(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    call __atomic_load_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -257,7 +257,7 @@ define i16 @atomic_load_i16_unordered(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    call __atomic_load_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -276,7 +276,7 @@ define i16 @atomic_load_i16_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    call __atomic_load_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -291,7 +291,7 @@ define i16 @atomic_load_i16_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    call __atomic_load_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -310,7 +310,7 @@ define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 2
-; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    call __atomic_load_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -331,7 +331,7 @@ define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 2
-; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    call __atomic_load_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -378,7 +378,7 @@ define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    call __atomic_load_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -401,7 +401,7 @@ define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    call __atomic_load_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -454,7 +454,7 @@ define i32 @atomic_load_i32_unordered(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    call __atomic_load_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -469,7 +469,7 @@ define i32 @atomic_load_i32_unordered(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    call __atomic_load_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -488,7 +488,7 @@ define i32 @atomic_load_i32_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    call __atomic_load_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -503,7 +503,7 @@ define i32 @atomic_load_i32_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    call __atomic_load_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -522,7 +522,7 @@ define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 2
-; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    call __atomic_load_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -543,7 +543,7 @@ define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 2
-; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    call __atomic_load_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -590,7 +590,7 @@ define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    call __atomic_load_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -613,7 +613,7 @@ define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    call __atomic_load_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -666,7 +666,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    call __atomic_load_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -676,7 +676,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a1, 0
-; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    call __atomic_load_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -686,7 +686,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    call __atomic_load_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -705,7 +705,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    call __atomic_load_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -715,7 +715,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a1, 0
-; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    call __atomic_load_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -725,7 +725,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    call __atomic_load_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -744,7 +744,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 2
-; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    call __atomic_load_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -754,7 +754,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a1, 2
-; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    call __atomic_load_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -764,7 +764,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 2
-; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    call __atomic_load_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -800,7 +800,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    call __atomic_load_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -810,7 +810,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a1, 5
-; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    call __atomic_load_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -820,7 +820,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    call __atomic_load_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -860,7 +860,7 @@ define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    call __atomic_store_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -875,7 +875,7 @@ define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    call __atomic_store_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -894,7 +894,7 @@ define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    call __atomic_store_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -909,7 +909,7 @@ define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    call __atomic_store_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -928,7 +928,7 @@ define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    call __atomic_store_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -949,7 +949,7 @@ define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    call __atomic_store_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -996,7 +996,7 @@ define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    call __atomic_store_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1018,7 +1018,7 @@ define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    call __atomic_store_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1070,7 +1070,7 @@ define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    call __atomic_store_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1085,7 +1085,7 @@ define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    call __atomic_store_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1104,7 +1104,7 @@ define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    call __atomic_store_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1119,7 +1119,7 @@ define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    call __atomic_store_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1138,7 +1138,7 @@ define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    call __atomic_store_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1159,7 +1159,7 @@ define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    call __atomic_store_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1206,7 +1206,7 @@ define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    call __atomic_store_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1228,7 +1228,7 @@ define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    call __atomic_store_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1280,7 +1280,7 @@ define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    call __atomic_store_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1295,7 +1295,7 @@ define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    call __atomic_store_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1314,7 +1314,7 @@ define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    call __atomic_store_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1329,7 +1329,7 @@ define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    call __atomic_store_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1348,7 +1348,7 @@ define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    call __atomic_store_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1369,7 +1369,7 @@ define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    call __atomic_store_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1416,7 +1416,7 @@ define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    call __atomic_store_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1438,7 +1438,7 @@ define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    call __atomic_store_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1490,7 +1490,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    call __atomic_store_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1500,7 +1500,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    call __atomic_store_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -1510,7 +1510,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    call __atomic_store_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1529,7 +1529,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    call __atomic_store_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1539,7 +1539,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    call __atomic_store_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -1549,7 +1549,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    call __atomic_store_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1568,7 +1568,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    call __atomic_store_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1578,7 +1578,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    call __atomic_store_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -1588,7 +1588,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    call __atomic_store_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1624,7 +1624,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    call __atomic_store_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1634,7 +1634,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    call __atomic_store_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -1644,7 +1644,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    call __atomic_store_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
index 895852b84e0041..8d3fc96109262e 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
@@ -24,7 +24,7 @@ define void @amoswap_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_exchange_8 at plt
+; RV32-NEXT:    call __atomic_exchange_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -57,7 +57,7 @@ define void @amoadd_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32-NEXT:    call __atomic_fetch_add_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -90,7 +90,7 @@ define void @amoand_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32-NEXT:    call __atomic_fetch_and_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -123,7 +123,7 @@ define void @amoor_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32-NEXT:    call __atomic_fetch_or_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -156,7 +156,7 @@ define void @amoxor_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32-NEXT:    call __atomic_fetch_or_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -205,7 +205,7 @@ define void @amomax_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a4, 12(sp)
 ; RV32-NEXT:    lw a5, 8(sp)
 ; RV32-NEXT:    bnez a0, .LBB11_6
@@ -281,7 +281,7 @@ define void @amomaxu_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a4, 12(sp)
 ; RV32-NEXT:    lw a5, 8(sp)
 ; RV32-NEXT:    bnez a0, .LBB13_6
@@ -357,7 +357,7 @@ define void @amomin_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a4, 12(sp)
 ; RV32-NEXT:    lw a5, 8(sp)
 ; RV32-NEXT:    bnez a0, .LBB15_6
@@ -433,7 +433,7 @@ define void @amominu_d_discard(ptr %a, i64 %b) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a4, 12(sp)
 ; RV32-NEXT:    lw a5, 8(sp)
 ; RV32-NEXT:    bnez a0, .LBB17_6
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
index 9fcf4c1b0541bd..4dafd6a08d973b 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
@@ -15,7 +15,7 @@ define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -32,7 +32,7 @@ define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -54,7 +54,7 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -66,7 +66,7 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
 ; RV32IA-NEXT:    li a1, 1
 ; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    li a2, 0
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -77,7 +77,7 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -98,7 +98,7 @@ define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sub a1, a1, a2
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -115,7 +115,7 @@ define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    subw a1, a1, a2
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -140,7 +140,7 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
 ; RV32I-NEXT:    sub a2, a2, a5
 ; RV32I-NEXT:    sub a1, a1, a3
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -154,7 +154,7 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
 ; RV32IA-NEXT:    sub a2, a2, a5
 ; RV32IA-NEXT:    sub a1, a1, a3
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -165,7 +165,7 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sub a1, a1, a2
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index e97a1ea5dfca00..d4c067b7b8a40c 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -18,7 +18,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -48,7 +48,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -82,7 +82,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -132,7 +132,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -186,7 +186,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -236,7 +236,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -290,7 +290,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -340,7 +340,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -394,7 +394,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -424,7 +424,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -462,7 +462,7 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -484,7 +484,7 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -510,7 +510,7 @@ define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -543,7 +543,7 @@ define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -580,7 +580,7 @@ define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -613,7 +613,7 @@ define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -650,7 +650,7 @@ define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -683,7 +683,7 @@ define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -720,7 +720,7 @@ define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -753,7 +753,7 @@ define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -790,7 +790,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -811,7 +811,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -836,7 +836,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -867,7 +867,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -902,7 +902,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -933,7 +933,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -968,7 +968,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -999,7 +999,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1034,7 +1034,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1065,7 +1065,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1099,7 +1099,7 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1129,7 +1129,7 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1163,7 +1163,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1213,7 +1213,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1267,7 +1267,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1317,7 +1317,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1371,7 +1371,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1421,7 +1421,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1475,7 +1475,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1505,7 +1505,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1539,7 +1539,7 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1569,7 +1569,7 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1603,7 +1603,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1653,7 +1653,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1707,7 +1707,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1757,7 +1757,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1811,7 +1811,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1861,7 +1861,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1915,7 +1915,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1945,7 +1945,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1979,7 +1979,7 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2003,7 +2003,7 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2031,7 +2031,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2069,7 +2069,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2111,7 +2111,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2149,7 +2149,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2191,7 +2191,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2229,7 +2229,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2271,7 +2271,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2309,7 +2309,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2351,7 +2351,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2382,7 +2382,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2417,7 +2417,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2469,7 +2469,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2525,7 +2525,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2577,7 +2577,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2633,7 +2633,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2685,7 +2685,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2741,7 +2741,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2772,7 +2772,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2807,7 +2807,7 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2827,7 +2827,7 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2851,7 +2851,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2881,7 +2881,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2915,7 +2915,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2945,7 +2945,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2979,7 +2979,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3009,7 +3009,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3043,7 +3043,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3073,7 +3073,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3107,7 +3107,7 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3127,7 +3127,7 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3151,7 +3151,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3181,7 +3181,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3215,7 +3215,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3245,7 +3245,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3279,7 +3279,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3309,7 +3309,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3343,7 +3343,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3373,7 +3373,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3422,7 +3422,7 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB45_4
 ; RV32I-NEXT:  .LBB45_2: # %atomicrmw.start
@@ -3493,7 +3493,7 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB45_4
 ; RV64I-NEXT:  .LBB45_2: # %atomicrmw.start
@@ -3568,7 +3568,7 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB46_4
 ; RV32I-NEXT:  .LBB46_2: # %atomicrmw.start
@@ -3668,7 +3668,7 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB46_4
 ; RV64I-NEXT:  .LBB46_2: # %atomicrmw.start
@@ -3772,7 +3772,7 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB47_4
 ; RV32I-NEXT:  .LBB47_2: # %atomicrmw.start
@@ -3872,7 +3872,7 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB47_4
 ; RV64I-NEXT:  .LBB47_2: # %atomicrmw.start
@@ -3976,7 +3976,7 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB48_4
 ; RV32I-NEXT:  .LBB48_2: # %atomicrmw.start
@@ -4076,7 +4076,7 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB48_4
 ; RV64I-NEXT:  .LBB48_2: # %atomicrmw.start
@@ -4180,7 +4180,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB49_4
 ; RV32I-NEXT:  .LBB49_2: # %atomicrmw.start
@@ -4251,7 +4251,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB49_4
 ; RV64I-NEXT:  .LBB49_2: # %atomicrmw.start
@@ -4326,7 +4326,7 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB50_4
 ; RV32I-NEXT:  .LBB50_2: # %atomicrmw.start
@@ -4397,7 +4397,7 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB50_4
 ; RV64I-NEXT:  .LBB50_2: # %atomicrmw.start
@@ -4472,7 +4472,7 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB51_4
 ; RV32I-NEXT:  .LBB51_2: # %atomicrmw.start
@@ -4572,7 +4572,7 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB51_4
 ; RV64I-NEXT:  .LBB51_2: # %atomicrmw.start
@@ -4676,7 +4676,7 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB52_4
 ; RV32I-NEXT:  .LBB52_2: # %atomicrmw.start
@@ -4776,7 +4776,7 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB52_4
 ; RV64I-NEXT:  .LBB52_2: # %atomicrmw.start
@@ -4880,7 +4880,7 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB53_4
 ; RV32I-NEXT:  .LBB53_2: # %atomicrmw.start
@@ -4980,7 +4980,7 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB53_4
 ; RV64I-NEXT:  .LBB53_2: # %atomicrmw.start
@@ -5084,7 +5084,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB54_4
 ; RV32I-NEXT:  .LBB54_2: # %atomicrmw.start
@@ -5155,7 +5155,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB54_4
 ; RV64I-NEXT:  .LBB54_2: # %atomicrmw.start
@@ -5229,7 +5229,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB55_4
 ; RV32I-NEXT:  .LBB55_2: # %atomicrmw.start
@@ -5293,7 +5293,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB55_4
 ; RV64I-NEXT:  .LBB55_2: # %atomicrmw.start
@@ -5361,7 +5361,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB56_4
 ; RV32I-NEXT:  .LBB56_2: # %atomicrmw.start
@@ -5449,7 +5449,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB56_4
 ; RV64I-NEXT:  .LBB56_2: # %atomicrmw.start
@@ -5541,7 +5541,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB57_4
 ; RV32I-NEXT:  .LBB57_2: # %atomicrmw.start
@@ -5629,7 +5629,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB57_4
 ; RV64I-NEXT:  .LBB57_2: # %atomicrmw.start
@@ -5721,7 +5721,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB58_4
 ; RV32I-NEXT:  .LBB58_2: # %atomicrmw.start
@@ -5809,7 +5809,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB58_4
 ; RV64I-NEXT:  .LBB58_2: # %atomicrmw.start
@@ -5901,7 +5901,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB59_4
 ; RV32I-NEXT:  .LBB59_2: # %atomicrmw.start
@@ -5965,7 +5965,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB59_4
 ; RV64I-NEXT:  .LBB59_2: # %atomicrmw.start
@@ -6033,7 +6033,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB60_4
 ; RV32I-NEXT:  .LBB60_2: # %atomicrmw.start
@@ -6097,7 +6097,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB60_4
 ; RV64I-NEXT:  .LBB60_2: # %atomicrmw.start
@@ -6165,7 +6165,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB61_4
 ; RV32I-NEXT:  .LBB61_2: # %atomicrmw.start
@@ -6253,7 +6253,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB61_4
 ; RV64I-NEXT:  .LBB61_2: # %atomicrmw.start
@@ -6345,7 +6345,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB62_4
 ; RV32I-NEXT:  .LBB62_2: # %atomicrmw.start
@@ -6433,7 +6433,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB62_4
 ; RV64I-NEXT:  .LBB62_2: # %atomicrmw.start
@@ -6525,7 +6525,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB63_4
 ; RV32I-NEXT:  .LBB63_2: # %atomicrmw.start
@@ -6613,7 +6613,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB63_4
 ; RV64I-NEXT:  .LBB63_2: # %atomicrmw.start
@@ -6705,7 +6705,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB64_4
 ; RV32I-NEXT:  .LBB64_2: # %atomicrmw.start
@@ -6769,7 +6769,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB64_4
 ; RV64I-NEXT:  .LBB64_2: # %atomicrmw.start
@@ -6823,7 +6823,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -6854,7 +6854,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -6889,7 +6889,7 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -6941,7 +6941,7 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -6997,7 +6997,7 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7049,7 +7049,7 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7105,7 +7105,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7157,7 +7157,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7213,7 +7213,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7244,7 +7244,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7283,7 +7283,7 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7306,7 +7306,7 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7333,7 +7333,7 @@ define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7368,7 +7368,7 @@ define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7407,7 +7407,7 @@ define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7442,7 +7442,7 @@ define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7481,7 +7481,7 @@ define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7516,7 +7516,7 @@ define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7555,7 +7555,7 @@ define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7590,7 +7590,7 @@ define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7630,7 +7630,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7653,7 +7653,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7680,7 +7680,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7714,7 +7714,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7752,7 +7752,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7786,7 +7786,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7824,7 +7824,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7858,7 +7858,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7896,7 +7896,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7930,7 +7930,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7966,7 +7966,7 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7997,7 +7997,7 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8032,7 +8032,7 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8084,7 +8084,7 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8140,7 +8140,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8192,7 +8192,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8248,7 +8248,7 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8300,7 +8300,7 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8356,7 +8356,7 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8387,7 +8387,7 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8422,7 +8422,7 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8453,7 +8453,7 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8488,7 +8488,7 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8540,7 +8540,7 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8596,7 +8596,7 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8648,7 +8648,7 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8704,7 +8704,7 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8756,7 +8756,7 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8812,7 +8812,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8843,7 +8843,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8878,7 +8878,7 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8903,7 +8903,7 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -8932,7 +8932,7 @@ define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -8972,7 +8972,7 @@ define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9016,7 +9016,7 @@ define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9056,7 +9056,7 @@ define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9100,7 +9100,7 @@ define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9140,7 +9140,7 @@ define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9184,7 +9184,7 @@ define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9224,7 +9224,7 @@ define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9268,7 +9268,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9300,7 +9300,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9336,7 +9336,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9390,7 +9390,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9448,7 +9448,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9502,7 +9502,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9560,7 +9560,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9614,7 +9614,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9672,7 +9672,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9704,7 +9704,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9740,7 +9740,7 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9761,7 +9761,7 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9786,7 +9786,7 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9818,7 +9818,7 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9854,7 +9854,7 @@ define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9886,7 +9886,7 @@ define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9922,7 +9922,7 @@ define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -9954,7 +9954,7 @@ define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -9990,7 +9990,7 @@ define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -10022,7 +10022,7 @@ define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -10058,7 +10058,7 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -10079,7 +10079,7 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -10104,7 +10104,7 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -10136,7 +10136,7 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -10172,7 +10172,7 @@ define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -10204,7 +10204,7 @@ define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -10240,7 +10240,7 @@ define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -10272,7 +10272,7 @@ define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -10308,7 +10308,7 @@ define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -10340,7 +10340,7 @@ define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -10391,7 +10391,7 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB110_4
 ; RV32I-NEXT:  .LBB110_2: # %atomicrmw.start
@@ -10464,7 +10464,7 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB110_4
 ; RV64I-NEXT:  .LBB110_2: # %atomicrmw.start
@@ -10541,7 +10541,7 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB111_4
 ; RV32I-NEXT:  .LBB111_2: # %atomicrmw.start
@@ -10645,7 +10645,7 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB111_4
 ; RV64I-NEXT:  .LBB111_2: # %atomicrmw.start
@@ -10753,7 +10753,7 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB112_4
 ; RV32I-NEXT:  .LBB112_2: # %atomicrmw.start
@@ -10857,7 +10857,7 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB112_4
 ; RV64I-NEXT:  .LBB112_2: # %atomicrmw.start
@@ -10965,7 +10965,7 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB113_4
 ; RV32I-NEXT:  .LBB113_2: # %atomicrmw.start
@@ -11069,7 +11069,7 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB113_4
 ; RV64I-NEXT:  .LBB113_2: # %atomicrmw.start
@@ -11177,7 +11177,7 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB114_4
 ; RV32I-NEXT:  .LBB114_2: # %atomicrmw.start
@@ -11250,7 +11250,7 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB114_4
 ; RV64I-NEXT:  .LBB114_2: # %atomicrmw.start
@@ -11327,7 +11327,7 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB115_4
 ; RV32I-NEXT:  .LBB115_2: # %atomicrmw.start
@@ -11400,7 +11400,7 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB115_4
 ; RV64I-NEXT:  .LBB115_2: # %atomicrmw.start
@@ -11477,7 +11477,7 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB116_4
 ; RV32I-NEXT:  .LBB116_2: # %atomicrmw.start
@@ -11581,7 +11581,7 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB116_4
 ; RV64I-NEXT:  .LBB116_2: # %atomicrmw.start
@@ -11689,7 +11689,7 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB117_4
 ; RV32I-NEXT:  .LBB117_2: # %atomicrmw.start
@@ -11793,7 +11793,7 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB117_4
 ; RV64I-NEXT:  .LBB117_2: # %atomicrmw.start
@@ -11901,7 +11901,7 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB118_4
 ; RV32I-NEXT:  .LBB118_2: # %atomicrmw.start
@@ -12005,7 +12005,7 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB118_4
 ; RV64I-NEXT:  .LBB118_2: # %atomicrmw.start
@@ -12113,7 +12113,7 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB119_4
 ; RV32I-NEXT:  .LBB119_2: # %atomicrmw.start
@@ -12186,7 +12186,7 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB119_4
 ; RV64I-NEXT:  .LBB119_2: # %atomicrmw.start
@@ -12265,7 +12265,7 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB120_4
 ; RV32I-NEXT:  .LBB120_2: # %atomicrmw.start
@@ -12334,7 +12334,7 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB120_4
 ; RV64I-NEXT:  .LBB120_2: # %atomicrmw.start
@@ -12407,7 +12407,7 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB121_4
 ; RV32I-NEXT:  .LBB121_2: # %atomicrmw.start
@@ -12501,7 +12501,7 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB121_4
 ; RV64I-NEXT:  .LBB121_2: # %atomicrmw.start
@@ -12599,7 +12599,7 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB122_4
 ; RV32I-NEXT:  .LBB122_2: # %atomicrmw.start
@@ -12693,7 +12693,7 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB122_4
 ; RV64I-NEXT:  .LBB122_2: # %atomicrmw.start
@@ -12791,7 +12791,7 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB123_4
 ; RV32I-NEXT:  .LBB123_2: # %atomicrmw.start
@@ -12885,7 +12885,7 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB123_4
 ; RV64I-NEXT:  .LBB123_2: # %atomicrmw.start
@@ -12983,7 +12983,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB124_4
 ; RV32I-NEXT:  .LBB124_2: # %atomicrmw.start
@@ -13052,7 +13052,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB124_4
 ; RV64I-NEXT:  .LBB124_2: # %atomicrmw.start
@@ -13125,7 +13125,7 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB125_4
 ; RV32I-NEXT:  .LBB125_2: # %atomicrmw.start
@@ -13194,7 +13194,7 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB125_4
 ; RV64I-NEXT:  .LBB125_2: # %atomicrmw.start
@@ -13267,7 +13267,7 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB126_4
 ; RV32I-NEXT:  .LBB126_2: # %atomicrmw.start
@@ -13361,7 +13361,7 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB126_4
 ; RV64I-NEXT:  .LBB126_2: # %atomicrmw.start
@@ -13459,7 +13459,7 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB127_4
 ; RV32I-NEXT:  .LBB127_2: # %atomicrmw.start
@@ -13553,7 +13553,7 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB127_4
 ; RV64I-NEXT:  .LBB127_2: # %atomicrmw.start
@@ -13651,7 +13651,7 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB128_4
 ; RV32I-NEXT:  .LBB128_2: # %atomicrmw.start
@@ -13745,7 +13745,7 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB128_4
 ; RV64I-NEXT:  .LBB128_2: # %atomicrmw.start
@@ -13843,7 +13843,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB129_4
 ; RV32I-NEXT:  .LBB129_2: # %atomicrmw.start
@@ -13912,7 +13912,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB129_4
 ; RV64I-NEXT:  .LBB129_2: # %atomicrmw.start
@@ -13968,7 +13968,7 @@ define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -13983,7 +13983,7 @@ define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14002,7 +14002,7 @@ define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14022,7 +14022,7 @@ define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14046,7 +14046,7 @@ define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14066,7 +14066,7 @@ define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14090,7 +14090,7 @@ define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14110,7 +14110,7 @@ define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14134,7 +14134,7 @@ define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14154,7 +14154,7 @@ define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14178,7 +14178,7 @@ define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14193,7 +14193,7 @@ define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14212,7 +14212,7 @@ define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14232,7 +14232,7 @@ define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14256,7 +14256,7 @@ define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14276,7 +14276,7 @@ define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14300,7 +14300,7 @@ define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14320,7 +14320,7 @@ define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14344,7 +14344,7 @@ define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14364,7 +14364,7 @@ define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14388,7 +14388,7 @@ define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14404,7 +14404,7 @@ define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14424,7 +14424,7 @@ define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14446,7 +14446,7 @@ define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14472,7 +14472,7 @@ define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14494,7 +14494,7 @@ define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14520,7 +14520,7 @@ define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14542,7 +14542,7 @@ define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14568,7 +14568,7 @@ define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14590,7 +14590,7 @@ define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14616,7 +14616,7 @@ define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14631,7 +14631,7 @@ define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14650,7 +14650,7 @@ define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14670,7 +14670,7 @@ define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14694,7 +14694,7 @@ define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14714,7 +14714,7 @@ define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14738,7 +14738,7 @@ define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14758,7 +14758,7 @@ define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14782,7 +14782,7 @@ define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14802,7 +14802,7 @@ define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14826,7 +14826,7 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14848,7 +14848,7 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14874,7 +14874,7 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14908,7 +14908,7 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -14946,7 +14946,7 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -14980,7 +14980,7 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15018,7 +15018,7 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15052,7 +15052,7 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15090,7 +15090,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15112,7 +15112,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15138,7 +15138,7 @@ define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15153,7 +15153,7 @@ define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15172,7 +15172,7 @@ define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15192,7 +15192,7 @@ define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15216,7 +15216,7 @@ define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15236,7 +15236,7 @@ define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15260,7 +15260,7 @@ define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15280,7 +15280,7 @@ define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15304,7 +15304,7 @@ define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15324,7 +15324,7 @@ define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15348,7 +15348,7 @@ define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15363,7 +15363,7 @@ define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15382,7 +15382,7 @@ define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15402,7 +15402,7 @@ define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15426,7 +15426,7 @@ define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15446,7 +15446,7 @@ define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15470,7 +15470,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15490,7 +15490,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15514,7 +15514,7 @@ define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -15534,7 +15534,7 @@ define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -15570,7 +15570,7 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB165_4
 ; RV32I-NEXT:  .LBB165_2: # %atomicrmw.start
@@ -15613,7 +15613,7 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB165_4
 ; RV64I-NEXT:  .LBB165_2: # %atomicrmw.start
@@ -15659,7 +15659,7 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB166_4
 ; RV32I-NEXT:  .LBB166_2: # %atomicrmw.start
@@ -15707,7 +15707,7 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB166_4
 ; RV64I-NEXT:  .LBB166_2: # %atomicrmw.start
@@ -15758,7 +15758,7 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB167_4
 ; RV32I-NEXT:  .LBB167_2: # %atomicrmw.start
@@ -15806,7 +15806,7 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB167_4
 ; RV64I-NEXT:  .LBB167_2: # %atomicrmw.start
@@ -15857,7 +15857,7 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB168_4
 ; RV32I-NEXT:  .LBB168_2: # %atomicrmw.start
@@ -15905,7 +15905,7 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB168_4
 ; RV64I-NEXT:  .LBB168_2: # %atomicrmw.start
@@ -15956,7 +15956,7 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB169_4
 ; RV32I-NEXT:  .LBB169_2: # %atomicrmw.start
@@ -16004,7 +16004,7 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB169_4
 ; RV64I-NEXT:  .LBB169_2: # %atomicrmw.start
@@ -16055,7 +16055,7 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB170_4
 ; RV32I-NEXT:  .LBB170_2: # %atomicrmw.start
@@ -16098,7 +16098,7 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB170_4
 ; RV64I-NEXT:  .LBB170_2: # %atomicrmw.start
@@ -16144,7 +16144,7 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB171_4
 ; RV32I-NEXT:  .LBB171_2: # %atomicrmw.start
@@ -16192,7 +16192,7 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB171_4
 ; RV64I-NEXT:  .LBB171_2: # %atomicrmw.start
@@ -16243,7 +16243,7 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB172_4
 ; RV32I-NEXT:  .LBB172_2: # %atomicrmw.start
@@ -16291,7 +16291,7 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB172_4
 ; RV64I-NEXT:  .LBB172_2: # %atomicrmw.start
@@ -16342,7 +16342,7 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB173_4
 ; RV32I-NEXT:  .LBB173_2: # %atomicrmw.start
@@ -16390,7 +16390,7 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB173_4
 ; RV64I-NEXT:  .LBB173_2: # %atomicrmw.start
@@ -16441,7 +16441,7 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB174_4
 ; RV32I-NEXT:  .LBB174_2: # %atomicrmw.start
@@ -16489,7 +16489,7 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB174_4
 ; RV64I-NEXT:  .LBB174_2: # %atomicrmw.start
@@ -16540,7 +16540,7 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB175_4
 ; RV32I-NEXT:  .LBB175_2: # %atomicrmw.start
@@ -16583,7 +16583,7 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB175_4
 ; RV64I-NEXT:  .LBB175_2: # %atomicrmw.start
@@ -16629,7 +16629,7 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB176_4
 ; RV32I-NEXT:  .LBB176_2: # %atomicrmw.start
@@ -16677,7 +16677,7 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB176_4
 ; RV64I-NEXT:  .LBB176_2: # %atomicrmw.start
@@ -16728,7 +16728,7 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB177_4
 ; RV32I-NEXT:  .LBB177_2: # %atomicrmw.start
@@ -16776,7 +16776,7 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB177_4
 ; RV64I-NEXT:  .LBB177_2: # %atomicrmw.start
@@ -16827,7 +16827,7 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB178_4
 ; RV32I-NEXT:  .LBB178_2: # %atomicrmw.start
@@ -16875,7 +16875,7 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB178_4
 ; RV64I-NEXT:  .LBB178_2: # %atomicrmw.start
@@ -16926,7 +16926,7 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB179_4
 ; RV32I-NEXT:  .LBB179_2: # %atomicrmw.start
@@ -16974,7 +16974,7 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB179_4
 ; RV64I-NEXT:  .LBB179_2: # %atomicrmw.start
@@ -17025,7 +17025,7 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB180_4
 ; RV32I-NEXT:  .LBB180_2: # %atomicrmw.start
@@ -17068,7 +17068,7 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB180_4
 ; RV64I-NEXT:  .LBB180_2: # %atomicrmw.start
@@ -17114,7 +17114,7 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB181_4
 ; RV32I-NEXT:  .LBB181_2: # %atomicrmw.start
@@ -17162,7 +17162,7 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB181_4
 ; RV64I-NEXT:  .LBB181_2: # %atomicrmw.start
@@ -17213,7 +17213,7 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB182_4
 ; RV32I-NEXT:  .LBB182_2: # %atomicrmw.start
@@ -17261,7 +17261,7 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB182_4
 ; RV64I-NEXT:  .LBB182_2: # %atomicrmw.start
@@ -17312,7 +17312,7 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB183_4
 ; RV32I-NEXT:  .LBB183_2: # %atomicrmw.start
@@ -17360,7 +17360,7 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB183_4
 ; RV64I-NEXT:  .LBB183_2: # %atomicrmw.start
@@ -17411,7 +17411,7 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB184_4
 ; RV32I-NEXT:  .LBB184_2: # %atomicrmw.start
@@ -17459,7 +17459,7 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB184_4
 ; RV64I-NEXT:  .LBB184_2: # %atomicrmw.start
@@ -17498,7 +17498,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17508,7 +17508,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17518,7 +17518,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17537,7 +17537,7 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 2
-; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17547,7 +17547,7 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 2
-; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17557,7 +17557,7 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17581,7 +17581,7 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17591,7 +17591,7 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17601,7 +17601,7 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17625,7 +17625,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 4
-; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17635,7 +17635,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 4
-; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17645,7 +17645,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17669,7 +17669,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17679,7 +17679,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17689,7 +17689,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17713,7 +17713,7 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17723,7 +17723,7 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_add_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17733,7 +17733,7 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17752,7 +17752,7 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 2
-; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17762,7 +17762,7 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 2
-; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_add_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17772,7 +17772,7 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17796,7 +17796,7 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17806,7 +17806,7 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_add_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17816,7 +17816,7 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17840,7 +17840,7 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 4
-; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17850,7 +17850,7 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 4
-; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_add_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17860,7 +17860,7 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17884,7 +17884,7 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17894,7 +17894,7 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_add_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17904,7 +17904,7 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17928,7 +17928,7 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17938,7 +17938,7 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17948,7 +17948,7 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -17968,7 +17968,7 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -17978,7 +17978,7 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 2
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -17988,7 +17988,7 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18014,7 +18014,7 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18024,7 +18024,7 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18034,7 +18034,7 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18060,7 +18060,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18070,7 +18070,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 4
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18080,7 +18080,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18106,7 +18106,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18116,7 +18116,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18126,7 +18126,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18152,7 +18152,7 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18162,7 +18162,7 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_and_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18172,7 +18172,7 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18191,7 +18191,7 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 2
-; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18201,7 +18201,7 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 2
-; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_and_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18211,7 +18211,7 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18235,7 +18235,7 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18245,7 +18245,7 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_and_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18255,7 +18255,7 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18279,7 +18279,7 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 4
-; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18289,7 +18289,7 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 4
-; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_and_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18299,7 +18299,7 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18323,7 +18323,7 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18333,7 +18333,7 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_and_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18343,7 +18343,7 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18367,7 +18367,7 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18377,7 +18377,7 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_nand_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18387,7 +18387,7 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18413,7 +18413,7 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18423,7 +18423,7 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 2
-; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_nand_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18433,7 +18433,7 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18471,7 +18471,7 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18481,7 +18481,7 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_nand_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18491,7 +18491,7 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18529,7 +18529,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18539,7 +18539,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 4
-; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_nand_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18549,7 +18549,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18587,7 +18587,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18597,7 +18597,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_nand_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18607,7 +18607,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18633,7 +18633,7 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18643,7 +18643,7 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_or_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18653,7 +18653,7 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18672,7 +18672,7 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 2
-; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18682,7 +18682,7 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 2
-; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_or_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18692,7 +18692,7 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18716,7 +18716,7 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18726,7 +18726,7 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_or_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18736,7 +18736,7 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18760,7 +18760,7 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 4
-; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18770,7 +18770,7 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 4
-; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_or_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18780,7 +18780,7 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18804,7 +18804,7 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18814,7 +18814,7 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_or_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18824,7 +18824,7 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18848,7 +18848,7 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18858,7 +18858,7 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_xor_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18868,7 +18868,7 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18887,7 +18887,7 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18897,7 +18897,7 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 2
-; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_xor_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18907,7 +18907,7 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18931,7 +18931,7 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18941,7 +18941,7 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 3
-; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_xor_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18951,7 +18951,7 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -18975,7 +18975,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -18985,7 +18985,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 4
-; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_xor_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -18995,7 +18995,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -19019,7 +19019,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -19029,7 +19029,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 5
-; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_xor_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -19039,7 +19039,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -19079,7 +19079,7 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB220_7
@@ -19133,7 +19133,7 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB220_7
@@ -19183,7 +19183,7 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB220_4
 ; RV64I-NEXT:  .LBB220_2: # %atomicrmw.start
@@ -19232,7 +19232,7 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB221_7
@@ -19286,7 +19286,7 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 2
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB221_7
@@ -19336,7 +19336,7 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB221_4
 ; RV64I-NEXT:  .LBB221_2: # %atomicrmw.start
@@ -19390,7 +19390,7 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB222_7
@@ -19444,7 +19444,7 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB222_7
@@ -19494,7 +19494,7 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB222_4
 ; RV64I-NEXT:  .LBB222_2: # %atomicrmw.start
@@ -19548,7 +19548,7 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 4
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB223_7
@@ -19602,7 +19602,7 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 4
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB223_7
@@ -19652,7 +19652,7 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB223_4
 ; RV64I-NEXT:  .LBB223_2: # %atomicrmw.start
@@ -19706,7 +19706,7 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB224_7
@@ -19760,7 +19760,7 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 5
 ; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB224_7
@@ -19810,7 +19810,7 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB224_4
 ; RV64I-NEXT:  .LBB224_2: # %atomicrmw.start
@@ -19864,7 +19864,7 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB225_7
@@ -19918,7 +19918,7 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB225_7
@@ -19968,7 +19968,7 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB225_4
 ; RV64I-NEXT:  .LBB225_2: # %atomicrmw.start
@@ -20017,7 +20017,7 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB226_7
@@ -20071,7 +20071,7 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 2
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB226_7
@@ -20121,7 +20121,7 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB226_4
 ; RV64I-NEXT:  .LBB226_2: # %atomicrmw.start
@@ -20175,7 +20175,7 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB227_7
@@ -20229,7 +20229,7 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB227_7
@@ -20279,7 +20279,7 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB227_4
 ; RV64I-NEXT:  .LBB227_2: # %atomicrmw.start
@@ -20333,7 +20333,7 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 4
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB228_7
@@ -20387,7 +20387,7 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 4
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB228_7
@@ -20437,7 +20437,7 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB228_4
 ; RV64I-NEXT:  .LBB228_2: # %atomicrmw.start
@@ -20491,7 +20491,7 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB229_7
@@ -20545,7 +20545,7 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 5
 ; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB229_7
@@ -20595,7 +20595,7 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB229_4
 ; RV64I-NEXT:  .LBB229_2: # %atomicrmw.start
@@ -20649,7 +20649,7 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB230_7
@@ -20703,7 +20703,7 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB230_7
@@ -20753,7 +20753,7 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB230_4
 ; RV64I-NEXT:  .LBB230_2: # %atomicrmw.start
@@ -20802,7 +20802,7 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB231_7
@@ -20856,7 +20856,7 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 2
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB231_7
@@ -20906,7 +20906,7 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB231_4
 ; RV64I-NEXT:  .LBB231_2: # %atomicrmw.start
@@ -20960,7 +20960,7 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB232_7
@@ -21014,7 +21014,7 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB232_7
@@ -21064,7 +21064,7 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB232_4
 ; RV64I-NEXT:  .LBB232_2: # %atomicrmw.start
@@ -21118,7 +21118,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 4
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB233_7
@@ -21172,7 +21172,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 4
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB233_7
@@ -21222,7 +21222,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB233_4
 ; RV64I-NEXT:  .LBB233_2: # %atomicrmw.start
@@ -21276,7 +21276,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB234_7
@@ -21330,7 +21330,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 5
 ; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB234_7
@@ -21380,7 +21380,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB234_4
 ; RV64I-NEXT:  .LBB234_2: # %atomicrmw.start
@@ -21434,7 +21434,7 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB235_7
@@ -21488,7 +21488,7 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB235_7
@@ -21538,7 +21538,7 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB235_4
 ; RV64I-NEXT:  .LBB235_2: # %atomicrmw.start
@@ -21587,7 +21587,7 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB236_7
@@ -21641,7 +21641,7 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 2
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB236_7
@@ -21691,7 +21691,7 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 2
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB236_4
 ; RV64I-NEXT:  .LBB236_2: # %atomicrmw.start
@@ -21745,7 +21745,7 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB237_7
@@ -21799,7 +21799,7 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB237_7
@@ -21849,7 +21849,7 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB237_4
 ; RV64I-NEXT:  .LBB237_2: # %atomicrmw.start
@@ -21903,7 +21903,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 4
 ; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB238_7
@@ -21957,7 +21957,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 4
 ; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB238_7
@@ -22007,7 +22007,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB238_4
 ; RV64I-NEXT:  .LBB238_2: # %atomicrmw.start
@@ -22061,7 +22061,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB239_7
@@ -22115,7 +22115,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    li a4, 5
 ; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB239_7
@@ -22165,7 +22165,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB239_4
 ; RV64I-NEXT:  .LBB239_2: # %atomicrmw.start
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index 2739fde250ee27..ef0c27f3280105 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -14,7 +14,7 @@ define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    call __atomic_load_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -31,7 +31,7 @@ define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    call __atomic_load_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -52,7 +52,7 @@ define signext i16 @atomic_load_i16_unordered(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    call __atomic_load_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -69,7 +69,7 @@ define signext i16 @atomic_load_i16_unordered(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    call __atomic_load_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -90,7 +90,7 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    call __atomic_load_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -105,7 +105,7 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    call __atomic_load_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -126,7 +126,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_exchange_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -160,7 +160,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_exchange_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -198,7 +198,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -232,7 +232,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -270,7 +270,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -304,7 +304,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -342,7 +342,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -370,7 +370,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -402,7 +402,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -437,7 +437,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -476,7 +476,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -500,7 +500,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -528,7 +528,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_1
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -552,7 +552,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -595,7 +595,7 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB10_4
 ; RV32I-NEXT:  .LBB10_2: # %atomicrmw.start
@@ -669,7 +669,7 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB10_4
 ; RV64I-NEXT:  .LBB10_2: # %atomicrmw.start
@@ -747,7 +747,7 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB11_4
 ; RV32I-NEXT:  .LBB11_2: # %atomicrmw.start
@@ -821,7 +821,7 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB11_4
 ; RV64I-NEXT:  .LBB11_2: # %atomicrmw.start
@@ -898,7 +898,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB12_4
 ; RV32I-NEXT:  .LBB12_2: # %atomicrmw.start
@@ -965,7 +965,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB12_4
 ; RV64I-NEXT:  .LBB12_2: # %atomicrmw.start
@@ -1036,7 +1036,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB13_4
 ; RV32I-NEXT:  .LBB13_2: # %atomicrmw.start
@@ -1103,7 +1103,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB13_4
 ; RV64I-NEXT:  .LBB13_2: # %atomicrmw.start
@@ -1160,7 +1160,7 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_exchange_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1195,7 +1195,7 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_exchange_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1234,7 +1234,7 @@ define signext i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1269,7 +1269,7 @@ define signext i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1308,7 +1308,7 @@ define signext i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1343,7 +1343,7 @@ define signext i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1382,7 +1382,7 @@ define signext i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1411,7 +1411,7 @@ define signext i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1444,7 +1444,7 @@ define signext i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1480,7 +1480,7 @@ define signext i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1520,7 +1520,7 @@ define signext i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1545,7 +1545,7 @@ define signext i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1574,7 +1574,7 @@ define signext i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_2
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1599,7 +1599,7 @@ define signext i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1643,7 +1643,7 @@ define signext i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB21_4
 ; RV32I-NEXT:  .LBB21_2: # %atomicrmw.start
@@ -1719,7 +1719,7 @@ define signext i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB21_4
 ; RV64I-NEXT:  .LBB21_2: # %atomicrmw.start
@@ -1799,7 +1799,7 @@ define signext i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB22_4
 ; RV32I-NEXT:  .LBB22_2: # %atomicrmw.start
@@ -1875,7 +1875,7 @@ define signext i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB22_4
 ; RV64I-NEXT:  .LBB22_2: # %atomicrmw.start
@@ -1957,7 +1957,7 @@ define signext i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB23_4
 ; RV32I-NEXT:  .LBB23_2: # %atomicrmw.start
@@ -2029,7 +2029,7 @@ define signext i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB23_4
 ; RV64I-NEXT:  .LBB23_2: # %atomicrmw.start
@@ -2105,7 +2105,7 @@ define signext i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB24_4
 ; RV32I-NEXT:  .LBB24_2: # %atomicrmw.start
@@ -2177,7 +2177,7 @@ define signext i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB24_4
 ; RV64I-NEXT:  .LBB24_2: # %atomicrmw.start
@@ -2236,7 +2236,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2251,7 +2251,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_exchange_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2271,7 +2271,7 @@ define signext i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2286,7 +2286,7 @@ define signext i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2306,7 +2306,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2322,7 +2322,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2343,7 +2343,7 @@ define signext i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2358,7 +2358,7 @@ define signext i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2378,7 +2378,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2400,7 +2400,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2427,7 +2427,7 @@ define signext i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2442,7 +2442,7 @@ define signext i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2462,7 +2462,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2477,7 +2477,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_4
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2509,7 +2509,7 @@ define signext i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB32_4
 ; RV32I-NEXT:  .LBB32_2: # %atomicrmw.start
@@ -2552,7 +2552,7 @@ define signext i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB32_4
 ; RV64I-NEXT:  .LBB32_2: # %atomicrmw.start
@@ -2598,7 +2598,7 @@ define signext i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB33_4
 ; RV32I-NEXT:  .LBB33_2: # %atomicrmw.start
@@ -2641,7 +2641,7 @@ define signext i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB33_4
 ; RV64I-NEXT:  .LBB33_2: # %atomicrmw.start
@@ -2687,7 +2687,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB34_4
 ; RV32I-NEXT:  .LBB34_2: # %atomicrmw.start
@@ -2730,7 +2730,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB34_4
 ; RV64I-NEXT:  .LBB34_2: # %atomicrmw.start
@@ -2776,7 +2776,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB35_4
 ; RV32I-NEXT:  .LBB35_2: # %atomicrmw.start
@@ -2819,7 +2819,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB35_4
 ; RV64I-NEXT:  .LBB35_2: # %atomicrmw.start
@@ -2853,7 +2853,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_exchange_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2863,7 +2863,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_exchange_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -2873,7 +2873,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_exchange_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2892,7 +2892,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2902,7 +2902,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_add_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -2912,7 +2912,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2931,7 +2931,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2941,7 +2941,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_sub_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -2951,7 +2951,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2971,7 +2971,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2981,7 +2981,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_and_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -2991,7 +2991,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3010,7 +3010,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3020,7 +3020,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_nand_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3030,7 +3030,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3056,7 +3056,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3066,7 +3066,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_or_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3076,7 +3076,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3095,7 +3095,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3105,7 +3105,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    li a3, 0
-; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    call __atomic_fetch_xor_8
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
@@ -3115,7 +3115,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_8
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3150,7 +3150,7 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB43_7
@@ -3204,7 +3204,7 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB43_7
@@ -3254,7 +3254,7 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB43_4
 ; RV64I-NEXT:  .LBB43_2: # %atomicrmw.start
@@ -3303,7 +3303,7 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB44_7
@@ -3357,7 +3357,7 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB44_7
@@ -3407,7 +3407,7 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB44_4
 ; RV64I-NEXT:  .LBB44_2: # %atomicrmw.start
@@ -3456,7 +3456,7 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB45_7
@@ -3510,7 +3510,7 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB45_7
@@ -3560,7 +3560,7 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB45_4
 ; RV64I-NEXT:  .LBB45_2: # %atomicrmw.start
@@ -3609,7 +3609,7 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    li a5, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB46_7
@@ -3663,7 +3663,7 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    li a4, 0
 ; RV32IA-NEXT:    li a5, 0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB46_7
@@ -3713,7 +3713,7 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB46_4
 ; RV64I-NEXT:  .LBB46_2: # %atomicrmw.start
@@ -3749,7 +3749,7 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lb a0, 11(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -3789,7 +3789,7 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lb a0, 7(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -3834,7 +3834,7 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3873,7 +3873,7 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3917,7 +3917,7 @@ define signext i16 @cmpxchg_i16_monotonic_monotonic_val0(ptr %ptr, i16 signext %
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a0, 10(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -3958,7 +3958,7 @@ define signext i16 @cmpxchg_i16_monotonic_monotonic_val0(ptr %ptr, i16 signext %
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a0, 6(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -4004,7 +4004,7 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(ptr %ptr, i16 signext %cmp, i16
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4044,7 +4044,7 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(ptr %ptr, i16 signext %cmp, i16
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4089,7 +4089,7 @@ define signext i32 @cmpxchg_i32_monotonic_monotonic_val0(ptr %ptr, i32 signext %
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a0, 8(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -4115,7 +4115,7 @@ define signext i32 @cmpxchg_i32_monotonic_monotonic_val0(ptr %ptr, i32 signext %
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a0, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -4146,7 +4146,7 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(ptr %ptr, i32 signext %cmp, i32
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4172,7 +4172,7 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(ptr %ptr, i32 signext %cmp, i32
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4204,7 +4204,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_exchange_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4239,7 +4239,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_exchange_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    sext.w a0, a0
@@ -4291,7 +4291,7 @@ define signext i32 @atomicrmw_add_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_add_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4326,7 +4326,7 @@ define signext i32 @atomicrmw_add_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_add_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    sext.w a0, a0
@@ -4379,7 +4379,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_sub_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4414,7 +4414,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_sub_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    sext.w a0, a0
@@ -4467,7 +4467,7 @@ define signext i32 @atomicrmw_and_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_and_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4502,7 +4502,7 @@ define signext i32 @atomicrmw_and_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_and_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    sext.w a0, a0
@@ -4555,7 +4555,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_nand_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4597,7 +4597,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_nand_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    sext.w a0, a0
@@ -4657,7 +4657,7 @@ define signext i32 @atomicrmw_or_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_or_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4692,7 +4692,7 @@ define signext i32 @atomicrmw_or_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_or_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    sext.w a0, a0
@@ -4745,7 +4745,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    call __atomic_fetch_xor_4
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4780,7 +4780,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    call __atomic_fetch_xor_4
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    sext.w a0, a0
@@ -4842,7 +4842,7 @@ define signext i32 @atomicrmw_max_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a1, 4(sp)
 ; RV32I-NEXT:    bnez a0, .LBB60_8
 ; RV32I-NEXT:  .LBB60_3: # %atomicrmw.start
@@ -4905,7 +4905,7 @@ define signext i32 @atomicrmw_max_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a1, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB60_8
 ; RV64I-NEXT:  .LBB60_3: # %atomicrmw.start
@@ -4989,7 +4989,7 @@ define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a1, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB61_8
 ; RV32I-NEXT:  .LBB61_3: # %atomicrmw.start
@@ -5055,7 +5055,7 @@ define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a1, 4(sp)
 ; RV64I-NEXT:    bnez a0, .LBB61_8
 ; RV64I-NEXT:  .LBB61_3: # %atomicrmw.start
@@ -5138,7 +5138,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a1, 4(sp)
 ; RV32I-NEXT:    beqz a0, .LBB62_2
 ; RV32I-NEXT:    j .LBB62_4
@@ -5188,7 +5188,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a1, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB62_6
 ; RV64I-NEXT:  .LBB62_3: # %atomicrmw.start
@@ -5266,7 +5266,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    li a4, 0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a1, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB63_8
 ; RV32I-NEXT:  .LBB63_3: # %atomicrmw.start
@@ -5334,7 +5334,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a1, 4(sp)
 ; RV64I-NEXT:    bnez a0, .LBB63_8
 ; RV64I-NEXT:  .LBB63_3: # %atomicrmw.start
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
index 5f15a9c0671021..aa962d68fc5285 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
@@ -39,7 +39,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 3(sp)
 ; RV32I-NEXT:    beqz a0, .LBB0_1
 ; RV32I-NEXT:  # %bb.2: # %atomicrmw.end
@@ -113,7 +113,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 7(sp)
 ; RV64I-NEXT:    beqz a0, .LBB0_1
 ; RV64I-NEXT:  # %bb.2: # %atomicrmw.end
@@ -195,7 +195,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    beqz a0, .LBB1_1
 ; RV32I-NEXT:  # %bb.2: # %atomicrmw.end
@@ -275,7 +275,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    beqz a0, .LBB1_1
 ; RV64I-NEXT:  # %bb.2: # %atomicrmw.end
@@ -354,7 +354,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    beqz a0, .LBB2_1
 ; RV32I-NEXT:  # %bb.2: # %atomicrmw.end
@@ -414,7 +414,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 4(sp)
 ; RV64I-NEXT:    beqz a0, .LBB2_1
 ; RV64I-NEXT:  # %bb.2: # %atomicrmw.end
@@ -490,7 +490,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB3_5
@@ -545,7 +545,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32IA-NEXT:    li a4, 5
 ; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB3_5
@@ -589,7 +589,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    beqz a0, .LBB3_1
 ; RV64I-NEXT:  # %bb.2: # %atomicrmw.end
@@ -653,7 +653,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_1
 ; RV32I-NEXT:    lbu a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB4_4
 ; RV32I-NEXT:  .LBB4_2: # %atomicrmw.start
@@ -749,7 +749,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_1
 ; RV64I-NEXT:    lbu a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB4_4
 ; RV64I-NEXT:  .LBB4_2: # %atomicrmw.start
@@ -853,7 +853,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_2
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB5_4
 ; RV32I-NEXT:  .LBB5_2: # %atomicrmw.start
@@ -955,7 +955,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_2
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB5_4
 ; RV64I-NEXT:  .LBB5_2: # %atomicrmw.start
@@ -1054,7 +1054,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB6_4
 ; RV32I-NEXT:  .LBB6_2: # %atomicrmw.start
@@ -1135,7 +1135,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_4
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB6_4
 ; RV64I-NEXT:  .LBB6_2: # %atomicrmw.start
@@ -1224,7 +1224,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    call __atomic_compare_exchange_8
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB7_7
@@ -1287,7 +1287,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32IA-NEXT:    li a4, 5
 ; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB7_7
@@ -1345,7 +1345,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
 ; RV64I-NEXT:    li a3, 5
 ; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB7_4
 ; RV64I-NEXT:  .LBB7_2: # %atomicrmw.start
diff --git a/llvm/test/CodeGen/RISCV/bf16-promote.ll b/llvm/test/CodeGen/RISCV/bf16-promote.ll
index c8fc84729da79e..c17450a80de963 100644
--- a/llvm/test/CodeGen/RISCV/bf16-promote.ll
+++ b/llvm/test/CodeGen/RISCV/bf16-promote.ll
@@ -45,7 +45,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    call __truncsfbf2 at plt
+; RV64-NEXT:    call __truncsfbf2
 ; RV64-NEXT:    fmv.x.w a0, fa0
 ; RV64-NEXT:    sh a0, 0(s0)
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -59,7 +59,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    call __truncsfbf2 at plt
+; RV32-NEXT:    call __truncsfbf2
 ; RV32-NEXT:    fmv.x.w a0, fa0
 ; RV32-NEXT:    sh a0, 0(s0)
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -78,7 +78,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    call __truncdfbf2 at plt
+; RV64-NEXT:    call __truncdfbf2
 ; RV64-NEXT:    fmv.x.w a0, fa0
 ; RV64-NEXT:    sh a0, 0(s0)
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -92,7 +92,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    call __truncdfbf2 at plt
+; RV32-NEXT:    call __truncdfbf2
 ; RV32-NEXT:    fmv.x.w a0, fa0
 ; RV32-NEXT:    sh a0, 0(s0)
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -118,7 +118,7 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    fmv.w.x fa4, a0
 ; RV64-NEXT:    fadd.s fa0, fa4, fa5
-; RV64-NEXT:    call __truncsfbf2 at plt
+; RV64-NEXT:    call __truncsfbf2
 ; RV64-NEXT:    fmv.x.w a0, fa0
 ; RV64-NEXT:    sh a0, 0(s0)
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -139,7 +139,7 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
 ; RV32-NEXT:    slli a0, a0, 16
 ; RV32-NEXT:    fmv.w.x fa4, a0
 ; RV32-NEXT:    fadd.s fa0, fa4, fa5
-; RV32-NEXT:    call __truncsfbf2 at plt
+; RV32-NEXT:    call __truncsfbf2
 ; RV32-NEXT:    fmv.x.w a0, fa0
 ; RV32-NEXT:    sh a0, 0(s0)
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -151,4 +151,4 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
   %r = fadd bfloat %a, %b
   store bfloat %r, ptr %p
   ret void
-}
\ No newline at end of file
+}
diff --git a/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll b/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll
index 24f26af9777019..165aa5f484f719 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll
@@ -18,7 +18,7 @@ define void @br_fcmp_false(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB0_2: # %if.else
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_false:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -29,7 +29,7 @@ define void @br_fcmp_false(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB0_2: # %if.else
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp false bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -51,7 +51,7 @@ define void @br_fcmp_oeq(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB1_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_oeq:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -64,7 +64,7 @@ define void @br_fcmp_oeq(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB1_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp oeq bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -86,7 +86,7 @@ define void @br_fcmp_oeq_alt(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB2_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_oeq_alt:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -99,7 +99,7 @@ define void @br_fcmp_oeq_alt(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB2_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp oeq bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -121,7 +121,7 @@ define void @br_fcmp_ogt(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB3_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_ogt:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -134,7 +134,7 @@ define void @br_fcmp_ogt(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB3_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp ogt bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -156,7 +156,7 @@ define void @br_fcmp_oge(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB4_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_oge:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -169,7 +169,7 @@ define void @br_fcmp_oge(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB4_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp oge bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -191,7 +191,7 @@ define void @br_fcmp_olt(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB5_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_olt:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -204,7 +204,7 @@ define void @br_fcmp_olt(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB5_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp olt bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -226,7 +226,7 @@ define void @br_fcmp_ole(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB6_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_ole:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -239,7 +239,7 @@ define void @br_fcmp_ole(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB6_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp ole bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -263,7 +263,7 @@ define void @br_fcmp_one(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB7_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_one:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -278,7 +278,7 @@ define void @br_fcmp_one(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB7_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp one bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -302,7 +302,7 @@ define void @br_fcmp_ord(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB8_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_ord:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -317,7 +317,7 @@ define void @br_fcmp_ord(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB8_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp ord bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -341,7 +341,7 @@ define void @br_fcmp_ueq(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB9_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_ueq:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -356,7 +356,7 @@ define void @br_fcmp_ueq(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB9_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp ueq bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -378,7 +378,7 @@ define void @br_fcmp_ugt(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB10_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_ugt:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -391,7 +391,7 @@ define void @br_fcmp_ugt(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB10_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp ugt bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -413,7 +413,7 @@ define void @br_fcmp_uge(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB11_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_uge:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -426,7 +426,7 @@ define void @br_fcmp_uge(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB11_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp uge bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -448,7 +448,7 @@ define void @br_fcmp_ult(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB12_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_ult:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -461,7 +461,7 @@ define void @br_fcmp_ult(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB12_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp ult bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -483,7 +483,7 @@ define void @br_fcmp_ule(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB13_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_ule:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -496,7 +496,7 @@ define void @br_fcmp_ule(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB13_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp ule bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -518,7 +518,7 @@ define void @br_fcmp_une(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB14_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_une:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -531,7 +531,7 @@ define void @br_fcmp_une(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB14_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp une bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -555,7 +555,7 @@ define void @br_fcmp_uno(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB15_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_uno:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -570,7 +570,7 @@ define void @br_fcmp_uno(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB15_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp uno bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -590,7 +590,7 @@ define void @br_fcmp_true(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:  .LBB16_2: # %if.then
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call abort at plt
+; RV32IZFBFMIN-NEXT:    call abort
 ;
 ; RV64IZFBFMIN-LABEL: br_fcmp_true:
 ; RV64IZFBFMIN:       # %bb.0:
@@ -601,7 +601,7 @@ define void @br_fcmp_true(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:  .LBB16_2: # %if.then
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call abort at plt
+; RV64IZFBFMIN-NEXT:    call abort
   %1 = fcmp true bfloat %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
index bfa2c3bb4a8ba6..d533607ad54e38 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
@@ -419,7 +419,7 @@ define i64 @fcvt_l_bf16(bfloat %a) nounwind {
 ; CHECK32ZFBFMIN-NEXT:    addi sp, sp, -16
 ; CHECK32ZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK32ZFBFMIN-NEXT:    fcvt.s.bf16 fa0, fa0
-; CHECK32ZFBFMIN-NEXT:    call __fixsfdi at plt
+; CHECK32ZFBFMIN-NEXT:    call __fixsfdi
 ; CHECK32ZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32ZFBFMIN-NEXT:    addi sp, sp, 16
 ; CHECK32ZFBFMIN-NEXT:    ret
@@ -431,7 +431,7 @@ define i64 @fcvt_l_bf16(bfloat %a) nounwind {
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    slli a0, a0, 16
 ; RV32ID-NEXT:    fmv.w.x fa0, a0
-; RV32ID-NEXT:    call __fixsfdi at plt
+; RV32ID-NEXT:    call __fixsfdi
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
 ; RV32ID-NEXT:    ret
@@ -465,7 +465,7 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
 ; RV32IZFBFMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFBFMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFBFMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFBFMIN-NEXT:    call __fixsfdi at plt
+; RV32IZFBFMIN-NEXT:    call __fixsfdi
 ; RV32IZFBFMIN-NEXT:    lui a4, 524288
 ; RV32IZFBFMIN-NEXT:    lui a2, 524288
 ; RV32IZFBFMIN-NEXT:    beqz s0, .LBB10_2
@@ -504,7 +504,7 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
 ; R32IDZFBFMIN-NEXT:    fmv.w.x fa5, a0
 ; R32IDZFBFMIN-NEXT:    fle.s s0, fa5, fs0
 ; R32IDZFBFMIN-NEXT:    fmv.s fa0, fs0
-; R32IDZFBFMIN-NEXT:    call __fixsfdi at plt
+; R32IDZFBFMIN-NEXT:    call __fixsfdi
 ; R32IDZFBFMIN-NEXT:    lui a4, 524288
 ; R32IDZFBFMIN-NEXT:    lui a2, 524288
 ; R32IDZFBFMIN-NEXT:    beqz s0, .LBB10_2
@@ -545,7 +545,7 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
 ; RV32ID-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-NEXT:    fle.s s0, fa5, fs0
 ; RV32ID-NEXT:    fmv.s fa0, fs0
-; RV32ID-NEXT:    call __fixsfdi at plt
+; RV32ID-NEXT:    call __fixsfdi
 ; RV32ID-NEXT:    lui a4, 524288
 ; RV32ID-NEXT:    lui a2, 524288
 ; RV32ID-NEXT:    beqz s0, .LBB10_2
@@ -606,7 +606,7 @@ define i64 @fcvt_lu_bf16(bfloat %a) nounwind {
 ; CHECK32ZFBFMIN-NEXT:    addi sp, sp, -16
 ; CHECK32ZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK32ZFBFMIN-NEXT:    fcvt.s.bf16 fa0, fa0
-; CHECK32ZFBFMIN-NEXT:    call __fixunssfdi at plt
+; CHECK32ZFBFMIN-NEXT:    call __fixunssfdi
 ; CHECK32ZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32ZFBFMIN-NEXT:    addi sp, sp, 16
 ; CHECK32ZFBFMIN-NEXT:    ret
@@ -618,7 +618,7 @@ define i64 @fcvt_lu_bf16(bfloat %a) nounwind {
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    slli a0, a0, 16
 ; RV32ID-NEXT:    fmv.w.x fa0, a0
-; RV32ID-NEXT:    call __fixunssfdi at plt
+; RV32ID-NEXT:    call __fixunssfdi
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
 ; RV32ID-NEXT:    ret
@@ -655,7 +655,7 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind {
 ; CHECK32ZFBFMIN-NEXT:    fmv.w.x fa5, zero
 ; CHECK32ZFBFMIN-NEXT:    fle.s a0, fa5, fa0
 ; CHECK32ZFBFMIN-NEXT:    neg s1, a0
-; CHECK32ZFBFMIN-NEXT:    call __fixunssfdi at plt
+; CHECK32ZFBFMIN-NEXT:    call __fixunssfdi
 ; CHECK32ZFBFMIN-NEXT:    and a0, s1, a0
 ; CHECK32ZFBFMIN-NEXT:    or a0, s0, a0
 ; CHECK32ZFBFMIN-NEXT:    and a1, s1, a1
@@ -682,7 +682,7 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind {
 ; RV32ID-NEXT:    fmv.w.x fa5, zero
 ; RV32ID-NEXT:    fle.s a0, fa5, fa0
 ; RV32ID-NEXT:    neg s1, a0
-; RV32ID-NEXT:    call __fixunssfdi at plt
+; RV32ID-NEXT:    call __fixunssfdi
 ; RV32ID-NEXT:    and a0, s1, a0
 ; RV32ID-NEXT:    or a0, s0, a0
 ; RV32ID-NEXT:    and a1, s1, a1
@@ -736,7 +736,7 @@ define bfloat @fcvt_bf16_si(i16 %a) nounwind {
 ; RV32ID-NEXT:    slli a0, a0, 16
 ; RV32ID-NEXT:    srai a0, a0, 16
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -760,7 +760,7 @@ define bfloat @fcvt_bf16_si(i16 %a) nounwind {
 ; RV64ID-NEXT:    slli a0, a0, 48
 ; RV64ID-NEXT:    srai a0, a0, 48
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -784,7 +784,7 @@ define bfloat @fcvt_bf16_si_signext(i16 signext %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -804,7 +804,7 @@ define bfloat @fcvt_bf16_si_signext(i16 signext %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -832,7 +832,7 @@ define bfloat @fcvt_bf16_ui(i16 %a) nounwind {
 ; RV32ID-NEXT:    slli a0, a0, 16
 ; RV32ID-NEXT:    srli a0, a0, 16
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -856,7 +856,7 @@ define bfloat @fcvt_bf16_ui(i16 %a) nounwind {
 ; RV64ID-NEXT:    slli a0, a0, 48
 ; RV64ID-NEXT:    srli a0, a0, 48
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -880,7 +880,7 @@ define bfloat @fcvt_bf16_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -900,7 +900,7 @@ define bfloat @fcvt_bf16_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -924,7 +924,7 @@ define bfloat @fcvt_bf16_w(i32 %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -945,7 +945,7 @@ define bfloat @fcvt_bf16_w(i32 %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -971,7 +971,7 @@ define bfloat @fcvt_bf16_w_load(ptr %p) nounwind {
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    lw a0, 0(a0)
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -993,7 +993,7 @@ define bfloat @fcvt_bf16_w_load(ptr %p) nounwind {
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    lw a0, 0(a0)
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -1018,7 +1018,7 @@ define bfloat @fcvt_bf16_wu(i32 %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -1040,7 +1040,7 @@ define bfloat @fcvt_bf16_wu(i32 %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -1066,7 +1066,7 @@ define bfloat @fcvt_bf16_wu_load(ptr %p) nounwind {
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    lw a0, 0(a0)
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -1088,7 +1088,7 @@ define bfloat @fcvt_bf16_wu_load(ptr %p) nounwind {
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    lwu a0, 0(a0)
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -1123,7 +1123,7 @@ define bfloat @fcvt_bf16_s(float %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -1141,7 +1141,7 @@ define bfloat @fcvt_bf16_s(float %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -1186,7 +1186,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
 ; RV32IZFBFMIN:       # %bb.0:
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT:    call __truncdfbf2 at plt
+; RV32IZFBFMIN-NEXT:    call __truncdfbf2
 ; RV32IZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFBFMIN-NEXT:    ret
@@ -1201,7 +1201,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __truncdfbf2 at plt
+; RV32ID-NEXT:    call __truncdfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -1214,7 +1214,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
 ; RV64IZFBFMIN:       # %bb.0:
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT:    call __truncdfbf2 at plt
+; RV64IZFBFMIN-NEXT:    call __truncdfbf2
 ; RV64IZFBFMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, 16
 ; RV64IZFBFMIN-NEXT:    ret
@@ -1229,7 +1229,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __truncdfbf2 at plt
+; RV64ID-NEXT:    call __truncdfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -1247,7 +1247,7 @@ define double @fcvt_d_bf16(bfloat %a) nounwind {
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFBFMIN-NEXT:    fcvt.s.bf16 fa0, fa0
-; RV32IZFBFMIN-NEXT:    call __extendsfdf2 at plt
+; RV32IZFBFMIN-NEXT:    call __extendsfdf2
 ; RV32IZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFBFMIN-NEXT:    ret
@@ -1271,7 +1271,7 @@ define double @fcvt_d_bf16(bfloat %a) nounwind {
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFBFMIN-NEXT:    fcvt.s.bf16 fa0, fa0
-; RV64IZFBFMIN-NEXT:    call __extendsfdf2 at plt
+; RV64IZFBFMIN-NEXT:    call __extendsfdf2
 ; RV64IZFBFMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, 16
 ; RV64IZFBFMIN-NEXT:    ret
@@ -1363,7 +1363,7 @@ define signext i32 @fcvt_bf16_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32ID-NEXT:    mv s0, a1
 ; RV32ID-NEXT:    addi s1, a0, 1
 ; RV32ID-NEXT:    fcvt.s.w fa0, s1
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    sh a0, 0(s0)
 ; RV32ID-NEXT:    mv a0, s1
@@ -1390,7 +1390,7 @@ define signext i32 @fcvt_bf16_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64ID-NEXT:    mv s0, a1
 ; RV64ID-NEXT:    addiw s1, a0, 1
 ; RV64ID-NEXT:    fcvt.s.w fa0, s1
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    sh a0, 0(s0)
 ; RV64ID-NEXT:    mv a0, s1
@@ -1423,7 +1423,7 @@ define signext i32 @fcvt_bf16_wu_demanded_bits(i32 signext %0, ptr %1) nounwind
 ; RV32ID-NEXT:    mv s0, a1
 ; RV32ID-NEXT:    addi s1, a0, 1
 ; RV32ID-NEXT:    fcvt.s.wu fa0, s1
-; RV32ID-NEXT:    call __truncsfbf2 at plt
+; RV32ID-NEXT:    call __truncsfbf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    sh a0, 0(s0)
 ; RV32ID-NEXT:    mv a0, s1
@@ -1452,7 +1452,7 @@ define signext i32 @fcvt_bf16_wu_demanded_bits(i32 signext %0, ptr %1) nounwind
 ; RV64ID-NEXT:    mv s0, a1
 ; RV64ID-NEXT:    addiw s1, a0, 1
 ; RV64ID-NEXT:    fcvt.s.wu fa0, s1
-; RV64ID-NEXT:    call __truncsfbf2 at plt
+; RV64ID-NEXT:    call __truncsfbf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    sh a0, 0(s0)
 ; RV64ID-NEXT:    mv a0, s1
diff --git a/llvm/test/CodeGen/RISCV/bfloat-frem.ll b/llvm/test/CodeGen/RISCV/bfloat-frem.ll
index fd6db9dfc3f78a..ac8b99d1ce6d74 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-frem.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-frem.ll
@@ -11,7 +11,7 @@ define bfloat @frem_bf16(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFBFMIN-NEXT:    fcvt.s.bf16 fa0, fa0
 ; RV32IZFBFMIN-NEXT:    fcvt.s.bf16 fa1, fa1
-; RV32IZFBFMIN-NEXT:    call fmodf at plt
+; RV32IZFBFMIN-NEXT:    call fmodf
 ; RV32IZFBFMIN-NEXT:    fcvt.bf16.s fa0, fa0
 ; RV32IZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, 16
@@ -23,7 +23,7 @@ define bfloat @frem_bf16(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFBFMIN-NEXT:    fcvt.s.bf16 fa0, fa0
 ; RV64IZFBFMIN-NEXT:    fcvt.s.bf16 fa1, fa1
-; RV64IZFBFMIN-NEXT:    call fmodf at plt
+; RV64IZFBFMIN-NEXT:    call fmodf
 ; RV64IZFBFMIN-NEXT:    fcvt.bf16.s fa0, fa0
 ; RV64IZFBFMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/bfloat-mem.ll b/llvm/test/CodeGen/RISCV/bfloat-mem.ll
index 1119611b821223..4b6c0c29d660be 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-mem.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-mem.ll
@@ -109,7 +109,7 @@ define bfloat @flh_stack(bfloat %a) nounwind {
 ; RV32IZFBFMIN-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFBFMIN-NEXT:    fmv.s fs0, fa0
 ; RV32IZFBFMIN-NEXT:    addi a0, sp, 4
-; RV32IZFBFMIN-NEXT:    call notdead at plt
+; RV32IZFBFMIN-NEXT:    call notdead
 ; RV32IZFBFMIN-NEXT:    flh fa5, 4(sp)
 ; RV32IZFBFMIN-NEXT:    fcvt.s.bf16 fa4, fs0
 ; RV32IZFBFMIN-NEXT:    fcvt.s.bf16 fa5, fa5
@@ -127,7 +127,7 @@ define bfloat @flh_stack(bfloat %a) nounwind {
 ; RV64IZFBFMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV64IZFBFMIN-NEXT:    fmv.s fs0, fa0
 ; RV64IZFBFMIN-NEXT:    mv a0, sp
-; RV64IZFBFMIN-NEXT:    call notdead at plt
+; RV64IZFBFMIN-NEXT:    call notdead
 ; RV64IZFBFMIN-NEXT:    flh fa5, 0(sp)
 ; RV64IZFBFMIN-NEXT:    fcvt.s.bf16 fa4, fs0
 ; RV64IZFBFMIN-NEXT:    fcvt.s.bf16 fa5, fa5
@@ -155,7 +155,7 @@ define dso_local void @fsh_stack(bfloat %a, bfloat %b) nounwind {
 ; RV32IZFBFMIN-NEXT:    fcvt.bf16.s fa5, fa5
 ; RV32IZFBFMIN-NEXT:    fsh fa5, 8(sp)
 ; RV32IZFBFMIN-NEXT:    addi a0, sp, 8
-; RV32IZFBFMIN-NEXT:    call notdead at plt
+; RV32IZFBFMIN-NEXT:    call notdead
 ; RV32IZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFBFMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFBFMIN-NEXT:    ret
@@ -170,7 +170,7 @@ define dso_local void @fsh_stack(bfloat %a, bfloat %b) nounwind {
 ; RV64IZFBFMIN-NEXT:    fcvt.bf16.s fa5, fa5
 ; RV64IZFBFMIN-NEXT:    fsh fa5, 4(sp)
 ; RV64IZFBFMIN-NEXT:    addi a0, sp, 4
-; RV64IZFBFMIN-NEXT:    call notdead at plt
+; RV64IZFBFMIN-NEXT:    call notdead
 ; RV64IZFBFMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFBFMIN-NEXT:    addi sp, sp, 16
 ; RV64IZFBFMIN-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/bfloat.ll b/llvm/test/CodeGen/RISCV/bfloat.ll
index d62f35388123f7..9dc8ce6be1ea61 100644
--- a/llvm/test/CodeGen/RISCV/bfloat.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat.ll
@@ -11,7 +11,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
 ; RV32I-ILP32:       # %bb.0:
 ; RV32I-ILP32-NEXT:    addi sp, sp, -16
 ; RV32I-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32I-ILP32-NEXT:    call __truncsfbf2
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -20,7 +20,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
 ; RV64I-LP64:       # %bb.0:
 ; RV64I-LP64-NEXT:    addi sp, sp, -16
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64I-LP64-NEXT:    call __truncsfbf2
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -29,7 +29,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -40,7 +40,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfbf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -51,7 +51,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
 ; RV32ID-ILP32D:       # %bb.0:
 ; RV32ID-ILP32D-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32D-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32D-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32D-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-ILP32D-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32D-NEXT:    or a0, a0, a1
@@ -64,7 +64,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
 ; RV64ID-LP64D:       # %bb.0:
 ; RV64ID-LP64D-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64D-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64D-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64D-NEXT:    call __truncsfbf2
 ; RV64ID-LP64D-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-LP64D-NEXT:    lui a1, 1048560
 ; RV64ID-LP64D-NEXT:    or a0, a0, a1
@@ -81,7 +81,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
 ; RV32I-ILP32:       # %bb.0:
 ; RV32I-ILP32-NEXT:    addi sp, sp, -16
 ; RV32I-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT:    call __truncdfbf2 at plt
+; RV32I-ILP32-NEXT:    call __truncdfbf2
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -90,7 +90,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
 ; RV64I-LP64:       # %bb.0:
 ; RV64I-LP64-NEXT:    addi sp, sp, -16
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT:    call __truncdfbf2 at plt
+; RV64I-LP64-NEXT:    call __truncdfbf2
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -99,7 +99,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __truncdfbf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncdfbf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -110,7 +110,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __truncdfbf2 at plt
+; RV64ID-LP64-NEXT:    call __truncdfbf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -121,7 +121,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
 ; RV32ID-ILP32D:       # %bb.0:
 ; RV32ID-ILP32D-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32D-NEXT:    call __truncdfbf2 at plt
+; RV32ID-ILP32D-NEXT:    call __truncdfbf2
 ; RV32ID-ILP32D-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-ILP32D-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32D-NEXT:    or a0, a0, a1
@@ -134,7 +134,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
 ; RV64ID-LP64D:       # %bb.0:
 ; RV64ID-LP64D-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64D-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64D-NEXT:    call __truncdfbf2 at plt
+; RV64ID-LP64D-NEXT:    call __truncdfbf2
 ; RV64ID-LP64D-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-LP64D-NEXT:    lui a1, 1048560
 ; RV64ID-LP64D-NEXT:    or a0, a0, a1
@@ -190,7 +190,7 @@ define double @bfloat_to_double(bfloat %a) nounwind {
 ; RV32I-ILP32-NEXT:    addi sp, sp, -16
 ; RV32I-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-ILP32-NEXT:    slli a0, a0, 16
-; RV32I-ILP32-NEXT:    call __extendsfdf2 at plt
+; RV32I-ILP32-NEXT:    call __extendsfdf2
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -200,7 +200,7 @@ define double @bfloat_to_double(bfloat %a) nounwind {
 ; RV64I-LP64-NEXT:    addi sp, sp, -16
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-LP64-NEXT:    slliw a0, a0, 16
-; RV64I-LP64-NEXT:    call __extendsfdf2 at plt
+; RV64I-LP64-NEXT:    call __extendsfdf2
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -319,8 +319,8 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
 ; RV32I-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-ILP32-NEXT:    slli a0, a0, 16
 ; RV32I-ILP32-NEXT:    slli a1, a1, 16
-; RV32I-ILP32-NEXT:    call __addsf3 at plt
-; RV32I-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32I-ILP32-NEXT:    call __addsf3
+; RV32I-ILP32-NEXT:    call __truncsfbf2
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -331,8 +331,8 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-LP64-NEXT:    slliw a0, a0, 16
 ; RV64I-LP64-NEXT:    slliw a1, a1, 16
-; RV64I-LP64-NEXT:    call __addsf3 at plt
-; RV64I-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64I-LP64-NEXT:    call __addsf3
+; RV64I-LP64-NEXT:    call __truncsfbf2
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -347,7 +347,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, a0
 ; RV32ID-ILP32-NEXT:    fadd.s fa5, fa4, fa5
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -364,7 +364,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
 ; RV64ID-LP64-NEXT:    fmv.w.x fa4, a0
 ; RV64ID-LP64-NEXT:    fadd.s fa5, fa4, fa5
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfbf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -382,7 +382,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
 ; RV32ID-ILP32D-NEXT:    slli a0, a0, 16
 ; RV32ID-ILP32D-NEXT:    fmv.w.x fa4, a0
 ; RV32ID-ILP32D-NEXT:    fadd.s fa0, fa4, fa5
-; RV32ID-ILP32D-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32D-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32D-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-ILP32D-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32D-NEXT:    or a0, a0, a1
@@ -402,7 +402,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
 ; RV64ID-LP64D-NEXT:    slli a0, a0, 16
 ; RV64ID-LP64D-NEXT:    fmv.w.x fa4, a0
 ; RV64ID-LP64D-NEXT:    fadd.s fa0, fa4, fa5
-; RV64ID-LP64D-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64D-NEXT:    call __truncsfbf2
 ; RV64ID-LP64D-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-LP64D-NEXT:    lui a1, 1048560
 ; RV64ID-LP64D-NEXT:    or a0, a0, a1
@@ -423,8 +423,8 @@ define bfloat @bfloat_load(ptr %a) nounwind {
 ; RV32I-ILP32-NEXT:    lh a2, 6(a0)
 ; RV32I-ILP32-NEXT:    slli a0, a1, 16
 ; RV32I-ILP32-NEXT:    slli a1, a2, 16
-; RV32I-ILP32-NEXT:    call __addsf3 at plt
-; RV32I-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32I-ILP32-NEXT:    call __addsf3
+; RV32I-ILP32-NEXT:    call __truncsfbf2
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -437,8 +437,8 @@ define bfloat @bfloat_load(ptr %a) nounwind {
 ; RV64I-LP64-NEXT:    lh a2, 6(a0)
 ; RV64I-LP64-NEXT:    slliw a0, a1, 16
 ; RV64I-LP64-NEXT:    slliw a1, a2, 16
-; RV64I-LP64-NEXT:    call __addsf3 at plt
-; RV64I-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64I-LP64-NEXT:    call __addsf3
+; RV64I-LP64-NEXT:    call __truncsfbf2
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -455,7 +455,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, a0
 ; RV32ID-ILP32-NEXT:    fadd.s fa5, fa4, fa5
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -474,7 +474,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
 ; RV64ID-LP64-NEXT:    fmv.w.x fa4, a0
 ; RV64ID-LP64-NEXT:    fadd.s fa5, fa4, fa5
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfbf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -492,7 +492,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
 ; RV32ID-ILP32D-NEXT:    slli a0, a0, 16
 ; RV32ID-ILP32D-NEXT:    fmv.w.x fa4, a0
 ; RV32ID-ILP32D-NEXT:    fadd.s fa0, fa4, fa5
-; RV32ID-ILP32D-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32D-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32D-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-ILP32D-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32D-NEXT:    or a0, a0, a1
@@ -512,7 +512,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
 ; RV64ID-LP64D-NEXT:    slli a0, a0, 16
 ; RV64ID-LP64D-NEXT:    fmv.w.x fa4, a0
 ; RV64ID-LP64D-NEXT:    fadd.s fa0, fa4, fa5
-; RV64ID-LP64D-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64D-NEXT:    call __truncsfbf2
 ; RV64ID-LP64D-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-LP64D-NEXT:    lui a1, 1048560
 ; RV64ID-LP64D-NEXT:    or a0, a0, a1
@@ -536,8 +536,8 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
 ; RV32I-ILP32-NEXT:    mv s0, a0
 ; RV32I-ILP32-NEXT:    slli a0, a1, 16
 ; RV32I-ILP32-NEXT:    slli a1, a2, 16
-; RV32I-ILP32-NEXT:    call __addsf3 at plt
-; RV32I-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32I-ILP32-NEXT:    call __addsf3
+; RV32I-ILP32-NEXT:    call __truncsfbf2
 ; RV32I-ILP32-NEXT:    sh a0, 0(s0)
 ; RV32I-ILP32-NEXT:    sh a0, 16(s0)
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -553,8 +553,8 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
 ; RV64I-LP64-NEXT:    mv s0, a0
 ; RV64I-LP64-NEXT:    slliw a0, a1, 16
 ; RV64I-LP64-NEXT:    slliw a1, a2, 16
-; RV64I-LP64-NEXT:    call __addsf3 at plt
-; RV64I-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64I-LP64-NEXT:    call __addsf3
+; RV64I-LP64-NEXT:    call __truncsfbf2
 ; RV64I-LP64-NEXT:    sh a0, 0(s0)
 ; RV64I-LP64-NEXT:    sh a0, 16(s0)
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -574,7 +574,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, a1
 ; RV32ID-ILP32-NEXT:    fadd.s fa5, fa4, fa5
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32-NEXT:    sh a0, 0(s0)
 ; RV32ID-ILP32-NEXT:    sh a0, 16(s0)
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -594,7 +594,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
 ; RV64ID-LP64-NEXT:    fmv.w.x fa4, a1
 ; RV64ID-LP64-NEXT:    fadd.s fa5, fa4, fa5
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfbf2
 ; RV64ID-LP64-NEXT:    sh a0, 0(s0)
 ; RV64ID-LP64-NEXT:    sh a0, 16(s0)
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -615,7 +615,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
 ; RV32ID-ILP32D-NEXT:    slli a0, a0, 16
 ; RV32ID-ILP32D-NEXT:    fmv.w.x fa4, a0
 ; RV32ID-ILP32D-NEXT:    fadd.s fa0, fa4, fa5
-; RV32ID-ILP32D-NEXT:    call __truncsfbf2 at plt
+; RV32ID-ILP32D-NEXT:    call __truncsfbf2
 ; RV32ID-ILP32D-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-ILP32D-NEXT:    sh a0, 0(s0)
 ; RV32ID-ILP32D-NEXT:    sh a0, 16(s0)
@@ -637,7 +637,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
 ; RV64ID-LP64D-NEXT:    slli a0, a0, 16
 ; RV64ID-LP64D-NEXT:    fmv.w.x fa4, a0
 ; RV64ID-LP64D-NEXT:    fadd.s fa0, fa4, fa5
-; RV64ID-LP64D-NEXT:    call __truncsfbf2 at plt
+; RV64ID-LP64D-NEXT:    call __truncsfbf2
 ; RV64ID-LP64D-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-LP64D-NEXT:    sh a0, 0(s0)
 ; RV64ID-LP64D-NEXT:    sh a0, 16(s0)
diff --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
index a05c518bbf3a01..d280e5ee46b7c0 100644
--- a/llvm/test/CodeGen/RISCV/bittest.ll
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -452,7 +452,7 @@ define void @bittest_switch(i32 signext %0) {
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    beqz a0, .LBB14_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB14_3:
 ; RV32I-NEXT:    ret
 ;
@@ -468,7 +468,7 @@ define void @bittest_switch(i32 signext %0) {
 ; RV64I-NEXT:    andi a0, a0, 1
 ; RV64I-NEXT:    beqz a0, .LBB14_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB14_3:
 ; RV64I-NEXT:    ret
 ;
@@ -482,7 +482,7 @@ define void @bittest_switch(i32 signext %0) {
 ; RV32ZBS-NEXT:    bext a0, a1, a0
 ; RV32ZBS-NEXT:    beqz a0, .LBB14_3
 ; RV32ZBS-NEXT:  # %bb.2:
-; RV32ZBS-NEXT:    tail bar at plt
+; RV32ZBS-NEXT:    tail bar
 ; RV32ZBS-NEXT:  .LBB14_3:
 ; RV32ZBS-NEXT:    ret
 ;
@@ -497,7 +497,7 @@ define void @bittest_switch(i32 signext %0) {
 ; RV64ZBS-NEXT:    bext a0, a1, a0
 ; RV64ZBS-NEXT:    beqz a0, .LBB14_3
 ; RV64ZBS-NEXT:  # %bb.2:
-; RV64ZBS-NEXT:    tail bar at plt
+; RV64ZBS-NEXT:    tail bar
 ; RV64ZBS-NEXT:  .LBB14_3:
 ; RV64ZBS-NEXT:    ret
 ;
@@ -512,7 +512,7 @@ define void @bittest_switch(i32 signext %0) {
 ; RV32XTHEADBS-NEXT:    andi a0, a0, 1
 ; RV32XTHEADBS-NEXT:    beqz a0, .LBB14_3
 ; RV32XTHEADBS-NEXT:  # %bb.2:
-; RV32XTHEADBS-NEXT:    tail bar at plt
+; RV32XTHEADBS-NEXT:    tail bar
 ; RV32XTHEADBS-NEXT:  .LBB14_3:
 ; RV32XTHEADBS-NEXT:    ret
 ;
@@ -528,7 +528,7 @@ define void @bittest_switch(i32 signext %0) {
 ; RV64XTHEADBS-NEXT:    andi a0, a0, 1
 ; RV64XTHEADBS-NEXT:    beqz a0, .LBB14_3
 ; RV64XTHEADBS-NEXT:  # %bb.2:
-; RV64XTHEADBS-NEXT:    tail bar at plt
+; RV64XTHEADBS-NEXT:    tail bar
 ; RV64XTHEADBS-NEXT:  .LBB14_3:
 ; RV64XTHEADBS-NEXT:    ret
   switch i32 %0, label %3 [
@@ -1243,7 +1243,7 @@ define void @bit_10_z_branch_i32(i32 signext %0) {
 ; CHECK-NEXT:    andi a0, a0, 1024
 ; CHECK-NEXT:    bnez a0, .LBB37_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB37_2:
 ; CHECK-NEXT:    ret
   %2 = and i32 %0, 1024
@@ -1264,7 +1264,7 @@ define void @bit_10_nz_branch_i32(i32 signext %0) {
 ; CHECK-NEXT:    andi a0, a0, 1024
 ; CHECK-NEXT:    beqz a0, .LBB38_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB38_2:
 ; CHECK-NEXT:    ret
   %2 = and i32 %0, 1024
@@ -1285,7 +1285,7 @@ define void @bit_11_z_branch_i32(i32 signext %0) {
 ; RV32-NEXT:    slli a0, a0, 20
 ; RV32-NEXT:    bltz a0, .LBB39_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB39_2:
 ; RV32-NEXT:    ret
 ;
@@ -1294,7 +1294,7 @@ define void @bit_11_z_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    bltz a0, .LBB39_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB39_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 2048
@@ -1315,7 +1315,7 @@ define void @bit_11_nz_branch_i32(i32 signext %0) {
 ; RV32-NEXT:    slli a0, a0, 20
 ; RV32-NEXT:    bgez a0, .LBB40_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB40_2:
 ; RV32-NEXT:    ret
 ;
@@ -1324,7 +1324,7 @@ define void @bit_11_nz_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    bgez a0, .LBB40_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB40_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 2048
@@ -1345,7 +1345,7 @@ define void @bit_24_z_branch_i32(i32 signext %0) {
 ; RV32-NEXT:    slli a0, a0, 7
 ; RV32-NEXT:    bltz a0, .LBB41_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB41_2:
 ; RV32-NEXT:    ret
 ;
@@ -1354,7 +1354,7 @@ define void @bit_24_z_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    slli a0, a0, 39
 ; RV64-NEXT:    bltz a0, .LBB41_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB41_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 16777216
@@ -1375,7 +1375,7 @@ define void @bit_24_nz_branch_i32(i32 signext %0) {
 ; RV32-NEXT:    slli a0, a0, 7
 ; RV32-NEXT:    bgez a0, .LBB42_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB42_2:
 ; RV32-NEXT:    ret
 ;
@@ -1384,7 +1384,7 @@ define void @bit_24_nz_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    slli a0, a0, 39
 ; RV64-NEXT:    bgez a0, .LBB42_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB42_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 16777216
@@ -1404,7 +1404,7 @@ define void @bit_31_z_branch_i32(i32 signext %0) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    bltz a0, .LBB43_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB43_2:
 ; RV32-NEXT:    ret
 ;
@@ -1414,7 +1414,7 @@ define void @bit_31_z_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    bnez a0, .LBB43_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB43_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 2147483648
@@ -1434,7 +1434,7 @@ define void @bit_31_nz_branch_i32(i32 signext %0) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    bgez a0, .LBB44_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB44_2:
 ; RV32-NEXT:    ret
 ;
@@ -1444,7 +1444,7 @@ define void @bit_31_nz_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    beqz a0, .LBB44_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB44_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 2147483648
@@ -1465,7 +1465,7 @@ define void @bit_10_z_branch_i64(i64 %0) {
 ; CHECK-NEXT:    andi a0, a0, 1024
 ; CHECK-NEXT:    bnez a0, .LBB45_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB45_2:
 ; CHECK-NEXT:    ret
   %2 = and i64 %0, 1024
@@ -1486,7 +1486,7 @@ define void @bit_10_nz_branch_i64(i64 %0) {
 ; CHECK-NEXT:    andi a0, a0, 1024
 ; CHECK-NEXT:    beqz a0, .LBB46_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB46_2:
 ; CHECK-NEXT:    ret
   %2 = and i64 %0, 1024
@@ -1507,7 +1507,7 @@ define void @bit_11_z_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a0, a0, 20
 ; RV32-NEXT:    bltz a0, .LBB47_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB47_2:
 ; RV32-NEXT:    ret
 ;
@@ -1516,7 +1516,7 @@ define void @bit_11_z_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    bltz a0, .LBB47_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB47_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 2048
@@ -1537,7 +1537,7 @@ define void @bit_11_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a0, a0, 20
 ; RV32-NEXT:    bgez a0, .LBB48_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB48_2:
 ; RV32-NEXT:    ret
 ;
@@ -1546,7 +1546,7 @@ define void @bit_11_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    bgez a0, .LBB48_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB48_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 2048
@@ -1567,7 +1567,7 @@ define void @bit_24_z_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a0, a0, 7
 ; RV32-NEXT:    bltz a0, .LBB49_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB49_2:
 ; RV32-NEXT:    ret
 ;
@@ -1576,7 +1576,7 @@ define void @bit_24_z_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 39
 ; RV64-NEXT:    bltz a0, .LBB49_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB49_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 16777216
@@ -1597,7 +1597,7 @@ define void @bit_24_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a0, a0, 7
 ; RV32-NEXT:    bgez a0, .LBB50_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB50_2:
 ; RV32-NEXT:    ret
 ;
@@ -1606,7 +1606,7 @@ define void @bit_24_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 39
 ; RV64-NEXT:    bgez a0, .LBB50_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB50_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 16777216
@@ -1626,7 +1626,7 @@ define void @bit_31_z_branch_i64(i64 %0) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    bltz a0, .LBB51_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB51_2:
 ; RV32-NEXT:    ret
 ;
@@ -1635,7 +1635,7 @@ define void @bit_31_z_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    bltz a0, .LBB51_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB51_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 2147483648
@@ -1655,7 +1655,7 @@ define void @bit_31_nz_branch_i64(i64 %0) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    bgez a0, .LBB52_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB52_2:
 ; RV32-NEXT:    ret
 ;
@@ -1664,7 +1664,7 @@ define void @bit_31_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    bgez a0, .LBB52_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB52_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 2147483648
@@ -1685,7 +1685,7 @@ define void @bit_32_z_branch_i64(i64 %0) {
 ; RV32-NEXT:    andi a1, a1, 1
 ; RV32-NEXT:    bnez a1, .LBB53_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB53_2:
 ; RV32-NEXT:    ret
 ;
@@ -1694,7 +1694,7 @@ define void @bit_32_z_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    bltz a0, .LBB53_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB53_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 4294967296
@@ -1715,7 +1715,7 @@ define void @bit_32_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    andi a1, a1, 1
 ; RV32-NEXT:    beqz a1, .LBB54_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB54_2:
 ; RV32-NEXT:    ret
 ;
@@ -1724,7 +1724,7 @@ define void @bit_32_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    bgez a0, .LBB54_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB54_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 4294967296
@@ -1745,7 +1745,7 @@ define void @bit_62_z_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a1, a1, 1
 ; RV32-NEXT:    bltz a1, .LBB55_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB55_2:
 ; RV32-NEXT:    ret
 ;
@@ -1754,7 +1754,7 @@ define void @bit_62_z_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 1
 ; RV64-NEXT:    bltz a0, .LBB55_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB55_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 4611686018427387904
@@ -1775,7 +1775,7 @@ define void @bit_62_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a1, a1, 1
 ; RV32-NEXT:    bgez a1, .LBB56_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB56_2:
 ; RV32-NEXT:    ret
 ;
@@ -1784,7 +1784,7 @@ define void @bit_62_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 1
 ; RV64-NEXT:    bgez a0, .LBB56_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB56_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 4611686018427387904
@@ -1804,7 +1804,7 @@ define void @bit_63_z_branch_i64(i64 %0) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    bltz a1, .LBB57_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB57_2:
 ; RV32-NEXT:    ret
 ;
@@ -1812,7 +1812,7 @@ define void @bit_63_z_branch_i64(i64 %0) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    bltz a0, .LBB57_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB57_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 9223372036854775808
@@ -1832,7 +1832,7 @@ define void @bit_63_nz_branch_i64(i64 %0) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    bgez a1, .LBB58_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB58_2:
 ; RV32-NEXT:    ret
 ;
@@ -1840,7 +1840,7 @@ define void @bit_63_nz_branch_i64(i64 %0) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    bgez a0, .LBB58_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB58_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 9223372036854775808
@@ -2675,7 +2675,7 @@ define void @bit_10_1_z_branch_i32(i32 signext %0) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB89_2:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
   %2 = and i32 %0, 1023
   %3 = icmp eq i32 %2, 0
   br i1 %3, label %4, label %5
@@ -2694,7 +2694,7 @@ define void @bit_10_1_nz_branch_i32(i32 signext %0) {
 ; CHECK-NEXT:    andi a0, a0, 1023
 ; CHECK-NEXT:    beqz a0, .LBB90_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB90_2:
 ; CHECK-NEXT:    ret
   %2 = and i32 %0, 1023
@@ -2717,7 +2717,7 @@ define void @bit_11_1_z_branch_i32(i32 signext %0) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB91_2:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
   %2 = and i32 %0, 2047
   %3 = icmp eq i32 %2, 0
   br i1 %3, label %4, label %5
@@ -2736,7 +2736,7 @@ define void @bit_11_1_nz_branch_i32(i32 signext %0) {
 ; CHECK-NEXT:    andi a0, a0, 2047
 ; CHECK-NEXT:    beqz a0, .LBB92_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB92_2:
 ; CHECK-NEXT:    ret
   %2 = and i32 %0, 2047
@@ -2759,7 +2759,7 @@ define void @bit_16_1_z_branch_i32(i32 signext %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB93_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_16_1_z_branch_i32:
 ; RV64:       # %bb.0:
@@ -2768,7 +2768,7 @@ define void @bit_16_1_z_branch_i32(i32 signext %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB93_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i32 %0, 65535
   %3 = icmp eq i32 %2, 0
   br i1 %3, label %4, label %5
@@ -2787,7 +2787,7 @@ define void @bit_16_1_nz_branch_i32(i32 signext %0) {
 ; RV32-NEXT:    slli a0, a0, 16
 ; RV32-NEXT:    beqz a0, .LBB94_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB94_2:
 ; RV32-NEXT:    ret
 ;
@@ -2796,7 +2796,7 @@ define void @bit_16_1_nz_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    slli a0, a0, 48
 ; RV64-NEXT:    beqz a0, .LBB94_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB94_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 65535
@@ -2819,7 +2819,7 @@ define void @bit_24_1_z_branch_i32(i32 signext %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB95_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_24_1_z_branch_i32:
 ; RV64:       # %bb.0:
@@ -2828,7 +2828,7 @@ define void @bit_24_1_z_branch_i32(i32 signext %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB95_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i32 %0, 16777215
   %3 = icmp eq i32 %2, 0
   br i1 %3, label %4, label %5
@@ -2847,7 +2847,7 @@ define void @bit_24_1_nz_branch_i32(i32 signext %0) {
 ; RV32-NEXT:    slli a0, a0, 8
 ; RV32-NEXT:    beqz a0, .LBB96_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB96_2:
 ; RV32-NEXT:    ret
 ;
@@ -2856,7 +2856,7 @@ define void @bit_24_1_nz_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    slli a0, a0, 40
 ; RV64-NEXT:    beqz a0, .LBB96_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB96_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 16777215
@@ -2879,7 +2879,7 @@ define void @bit_31_1_z_branch_i32(i32 signext %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB97_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_31_1_z_branch_i32:
 ; RV64:       # %bb.0:
@@ -2888,7 +2888,7 @@ define void @bit_31_1_z_branch_i32(i32 signext %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB97_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i32 %0, 2147483647
   %3 = icmp eq i32 %2, 0
   br i1 %3, label %4, label %5
@@ -2907,7 +2907,7 @@ define void @bit_31_1_nz_branch_i32(i32 signext %0) {
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    beqz a0, .LBB98_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB98_2:
 ; RV32-NEXT:    ret
 ;
@@ -2916,7 +2916,7 @@ define void @bit_31_1_nz_branch_i32(i32 signext %0) {
 ; RV64-NEXT:    slli a0, a0, 33
 ; RV64-NEXT:    beqz a0, .LBB98_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB98_2:
 ; RV64-NEXT:    ret
   %2 = and i32 %0, 2147483647
@@ -2938,7 +2938,7 @@ define void @bit_32_1_z_branch_i32(i32 signext %0) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB99_2:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
   %2 = and i32 %0, 4294967295
   %3 = icmp eq i32 %2, 0
   br i1 %3, label %4, label %5
@@ -2956,7 +2956,7 @@ define void @bit_32_1_nz_branch_i32(i32 signext %0) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    beqz a0, .LBB100_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB100_2:
 ; CHECK-NEXT:    ret
   %2 = and i32 %0, 4294967295
@@ -2980,7 +2980,7 @@ define void @bit_10_1_z_branch_i64(i64 %0) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB101_2:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
   %2 = and i64 %0, 1023
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -2999,7 +2999,7 @@ define void @bit_10_1_nz_branch_i64(i64 %0) {
 ; CHECK-NEXT:    andi a0, a0, 1023
 ; CHECK-NEXT:    beqz a0, .LBB102_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB102_2:
 ; CHECK-NEXT:    ret
   %2 = and i64 %0, 1023
@@ -3022,7 +3022,7 @@ define void @bit_11_1_z_branch_i64(i64 %0) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB103_2:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
   %2 = and i64 %0, 2047
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3041,7 +3041,7 @@ define void @bit_11_1_nz_branch_i64(i64 %0) {
 ; CHECK-NEXT:    andi a0, a0, 2047
 ; CHECK-NEXT:    beqz a0, .LBB104_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB104_2:
 ; CHECK-NEXT:    ret
   %2 = and i64 %0, 2047
@@ -3064,7 +3064,7 @@ define void @bit_16_1_z_branch_i64(i64 %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB105_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_16_1_z_branch_i64:
 ; RV64:       # %bb.0:
@@ -3073,7 +3073,7 @@ define void @bit_16_1_z_branch_i64(i64 %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB105_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i64 %0, 65535
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3092,7 +3092,7 @@ define void @bit_16_1_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a0, a0, 16
 ; RV32-NEXT:    beqz a0, .LBB106_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB106_2:
 ; RV32-NEXT:    ret
 ;
@@ -3101,7 +3101,7 @@ define void @bit_16_1_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 48
 ; RV64-NEXT:    beqz a0, .LBB106_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB106_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 65535
@@ -3124,7 +3124,7 @@ define void @bit_24_1_z_branch_i64(i64 %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB107_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_24_1_z_branch_i64:
 ; RV64:       # %bb.0:
@@ -3133,7 +3133,7 @@ define void @bit_24_1_z_branch_i64(i64 %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB107_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i64 %0, 16777215
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3152,7 +3152,7 @@ define void @bit_24_1_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a0, a0, 8
 ; RV32-NEXT:    beqz a0, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB108_2:
 ; RV32-NEXT:    ret
 ;
@@ -3161,7 +3161,7 @@ define void @bit_24_1_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 40
 ; RV64-NEXT:    beqz a0, .LBB108_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB108_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 16777215
@@ -3184,7 +3184,7 @@ define void @bit_31_1_z_branch_i64(i64 %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB109_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_31_1_z_branch_i64:
 ; RV64:       # %bb.0:
@@ -3193,7 +3193,7 @@ define void @bit_31_1_z_branch_i64(i64 %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB109_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i64 %0, 2147483647
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3212,7 +3212,7 @@ define void @bit_31_1_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    beqz a0, .LBB110_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB110_2:
 ; RV32-NEXT:    ret
 ;
@@ -3221,7 +3221,7 @@ define void @bit_31_1_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 33
 ; RV64-NEXT:    beqz a0, .LBB110_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB110_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 2147483647
@@ -3243,7 +3243,7 @@ define void @bit_32_1_z_branch_i64(i64 %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB111_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_32_1_z_branch_i64:
 ; RV64:       # %bb.0:
@@ -3252,7 +3252,7 @@ define void @bit_32_1_z_branch_i64(i64 %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB111_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i64 %0, 4294967295
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3270,7 +3270,7 @@ define void @bit_32_1_nz_branch_i64(i64 %0) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    beqz a0, .LBB112_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB112_2:
 ; RV32-NEXT:    ret
 ;
@@ -3279,7 +3279,7 @@ define void @bit_32_1_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    sext.w a0, a0
 ; RV64-NEXT:    beqz a0, .LBB112_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB112_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 4294967295
@@ -3304,7 +3304,7 @@ define void @bit_62_1_z_branch_i64(i64 %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB113_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_62_1_z_branch_i64:
 ; RV64:       # %bb.0:
@@ -3313,7 +3313,7 @@ define void @bit_62_1_z_branch_i64(i64 %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB113_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i64 %0, 4611686018427387903
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3334,7 +3334,7 @@ define void @bit_62_1_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    beqz a0, .LBB114_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB114_2:
 ; RV32-NEXT:    ret
 ;
@@ -3343,7 +3343,7 @@ define void @bit_62_1_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 2
 ; RV64-NEXT:    beqz a0, .LBB114_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB114_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 4611686018427387903
@@ -3368,7 +3368,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB115_2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_63_1_z_branch_i64:
 ; RV64:       # %bb.0:
@@ -3377,7 +3377,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB115_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ;
 ; RV32ZBS-LABEL: bit_63_1_z_branch_i64:
 ; RV32ZBS:       # %bb.0:
@@ -3387,7 +3387,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
 ; RV32ZBS-NEXT:  # %bb.1:
 ; RV32ZBS-NEXT:    ret
 ; RV32ZBS-NEXT:  .LBB115_2:
-; RV32ZBS-NEXT:    tail bar at plt
+; RV32ZBS-NEXT:    tail bar
 ;
 ; RV32XTHEADBS-LABEL: bit_63_1_z_branch_i64:
 ; RV32XTHEADBS:       # %bb.0:
@@ -3398,7 +3398,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
 ; RV32XTHEADBS-NEXT:  # %bb.1:
 ; RV32XTHEADBS-NEXT:    ret
 ; RV32XTHEADBS-NEXT:  .LBB115_2:
-; RV32XTHEADBS-NEXT:    tail bar at plt
+; RV32XTHEADBS-NEXT:    tail bar
   %2 = and i64 %0, 9223372036854775807
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3419,7 +3419,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    beqz a0, .LBB116_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB116_2:
 ; RV32I-NEXT:    ret
 ;
@@ -3428,7 +3428,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
 ; RV64-NEXT:    slli a0, a0, 1
 ; RV64-NEXT:    beqz a0, .LBB116_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB116_2:
 ; RV64-NEXT:    ret
 ;
@@ -3438,7 +3438,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
 ; RV32ZBS-NEXT:    or a0, a0, a1
 ; RV32ZBS-NEXT:    beqz a0, .LBB116_2
 ; RV32ZBS-NEXT:  # %bb.1:
-; RV32ZBS-NEXT:    tail bar at plt
+; RV32ZBS-NEXT:    tail bar
 ; RV32ZBS-NEXT:  .LBB116_2:
 ; RV32ZBS-NEXT:    ret
 ;
@@ -3449,7 +3449,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
 ; RV32XTHEADBS-NEXT:    or a0, a0, a1
 ; RV32XTHEADBS-NEXT:    beqz a0, .LBB116_2
 ; RV32XTHEADBS-NEXT:  # %bb.1:
-; RV32XTHEADBS-NEXT:    tail bar at plt
+; RV32XTHEADBS-NEXT:    tail bar
 ; RV32XTHEADBS-NEXT:  .LBB116_2:
 ; RV32XTHEADBS-NEXT:    ret
   %2 = and i64 %0, 9223372036854775807
@@ -3472,7 +3472,7 @@ define void @bit_64_1_z_branch_i64(i64 %0) {
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB117_2:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ;
 ; RV64-LABEL: bit_64_1_z_branch_i64:
 ; RV64:       # %bb.0:
@@ -3480,7 +3480,7 @@ define void @bit_64_1_z_branch_i64(i64 %0) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB117_2:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
   %2 = and i64 %0, 18446744073709551615
   %3 = icmp eq i64 %2, 0
   br i1 %3, label %4, label %5
@@ -3499,7 +3499,7 @@ define void @bit_64_1_nz_branch_i64(i64 %0) {
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    beqz a0, .LBB118_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    tail bar at plt
+; RV32-NEXT:    tail bar
 ; RV32-NEXT:  .LBB118_2:
 ; RV32-NEXT:    ret
 ;
@@ -3507,7 +3507,7 @@ define void @bit_64_1_nz_branch_i64(i64 %0) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    beqz a0, .LBB118_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    tail bar at plt
+; RV64-NEXT:    tail bar
 ; RV64-NEXT:  .LBB118_2:
 ; RV64-NEXT:    ret
   %2 = and i64 %0, 18446744073709551615
diff --git a/llvm/test/CodeGen/RISCV/byval.ll b/llvm/test/CodeGen/RISCV/byval.ll
index d300542e080752..9151f3b03e7c28 100644
--- a/llvm/test/CodeGen/RISCV/byval.ll
+++ b/llvm/test/CodeGen/RISCV/byval.ll
@@ -32,7 +32,7 @@ define void @caller() nounwind {
 ; RV32I-NEXT:    lw a0, 4(a0)
 ; RV32I-NEXT:    sw a0, 16(sp)
 ; RV32I-NEXT:    addi a0, sp, 12
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
index 1aa7783fedd2ae..79e80dac8241fb 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
@@ -629,7 +629,7 @@ define void @caller() nounwind {
 ; ILP32-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw fa5, 124(s1)
 ; ILP32-NEXT:    fsw fa5, 4(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    call callee at plt
+; ILP32-NEXT:    call callee
 ; ILP32-NEXT:    flw fa5, 4(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw fa5, 124(s1)
 ; ILP32-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
@@ -772,7 +772,7 @@ define void @caller() nounwind {
 ; LP64-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw fa5, 124(s1)
 ; LP64-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
-; LP64-NEXT:    call callee at plt
+; LP64-NEXT:    call callee
 ; LP64-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw fa5, 124(s1)
 ; LP64-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
@@ -915,7 +915,7 @@ define void @caller() nounwind {
 ; ILP32F-NEXT:    flw fs5, 116(s1)
 ; ILP32F-NEXT:    flw fs6, 120(s1)
 ; ILP32F-NEXT:    flw fs7, 124(s1)
-; ILP32F-NEXT:    call callee at plt
+; ILP32F-NEXT:    call callee
 ; ILP32F-NEXT:    fsw fs7, 124(s1)
 ; ILP32F-NEXT:    fsw fs6, 120(s1)
 ; ILP32F-NEXT:    fsw fs5, 116(s1)
@@ -1058,7 +1058,7 @@ define void @caller() nounwind {
 ; LP64F-NEXT:    flw fs5, 116(s1)
 ; LP64F-NEXT:    flw fs6, 120(s1)
 ; LP64F-NEXT:    flw fs7, 124(s1)
-; LP64F-NEXT:    call callee at plt
+; LP64F-NEXT:    call callee
 ; LP64F-NEXT:    fsw fs7, 124(s1)
 ; LP64F-NEXT:    fsw fs6, 120(s1)
 ; LP64F-NEXT:    fsw fs5, 116(s1)
@@ -1201,7 +1201,7 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    flw fs5, 116(s1)
 ; ILP32D-NEXT:    flw fs6, 120(s1)
 ; ILP32D-NEXT:    flw fs7, 124(s1)
-; ILP32D-NEXT:    call callee at plt
+; ILP32D-NEXT:    call callee
 ; ILP32D-NEXT:    fsw fs7, 124(s1)
 ; ILP32D-NEXT:    fsw fs6, 120(s1)
 ; ILP32D-NEXT:    fsw fs5, 116(s1)
@@ -1344,7 +1344,7 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    flw fs5, 116(s1)
 ; LP64D-NEXT:    flw fs6, 120(s1)
 ; LP64D-NEXT:    flw fs7, 124(s1)
-; LP64D-NEXT:    call callee at plt
+; LP64D-NEXT:    call callee
 ; LP64D-NEXT:    fsw fs7, 124(s1)
 ; LP64D-NEXT:    fsw fs6, 120(s1)
 ; LP64D-NEXT:    fsw fs5, 116(s1)
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
index 40076316bca892..abfa26e8a4f251 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
@@ -433,7 +433,7 @@ define void @caller() nounwind {
 ; ILP32-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld fa5, 248(s1)
 ; ILP32-NEXT:    fsd fa5, 0(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    call callee at plt
+; ILP32-NEXT:    call callee
 ; ILP32-NEXT:    fld fa5, 0(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd fa5, 248(s1)
 ; ILP32-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
@@ -576,7 +576,7 @@ define void @caller() nounwind {
 ; LP64-NEXT:    fsd fa5, 16(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld fa5, 248(s1)
 ; LP64-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
-; LP64-NEXT:    call callee at plt
+; LP64-NEXT:    call callee
 ; LP64-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd fa5, 248(s1)
 ; LP64-NEXT:    fld fa5, 16(sp) # 8-byte Folded Reload
@@ -719,7 +719,7 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    fld fs5, 232(s1)
 ; ILP32D-NEXT:    fld fs6, 240(s1)
 ; ILP32D-NEXT:    fld fs7, 248(s1)
-; ILP32D-NEXT:    call callee at plt
+; ILP32D-NEXT:    call callee
 ; ILP32D-NEXT:    fsd fs7, 248(s1)
 ; ILP32D-NEXT:    fsd fs6, 240(s1)
 ; ILP32D-NEXT:    fsd fs5, 232(s1)
@@ -862,7 +862,7 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    fld fs5, 232(s1)
 ; LP64D-NEXT:    fld fs6, 240(s1)
 ; LP64D-NEXT:    fld fs7, 248(s1)
-; LP64D-NEXT:    call callee at plt
+; LP64D-NEXT:    call callee
 ; LP64D-NEXT:    fsd fs7, 248(s1)
 ; LP64D-NEXT:    fsd fs6, 240(s1)
 ; LP64D-NEXT:    fsd fs5, 232(s1)
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index 09ecbbc7e8feb8..6303a124567798 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -952,7 +952,7 @@ define void @caller() nounwind {
 ; RV32I-NEXT:    lw s11, 116(s5)
 ; RV32I-NEXT:    lw s1, 120(s5)
 ; RV32I-NEXT:    lw s2, 124(s5)
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    sw s2, 124(s5)
 ; RV32I-NEXT:    sw s1, 120(s5)
 ; RV32I-NEXT:    sw s11, 116(s5)
@@ -1097,7 +1097,7 @@ define void @caller() nounwind {
 ; RV32I-WITH-FP-NEXT:    lw s4, 116(s6)
 ; RV32I-WITH-FP-NEXT:    lw s5, 120(s6)
 ; RV32I-WITH-FP-NEXT:    lw s7, 124(s6)
-; RV32I-WITH-FP-NEXT:    call callee at plt
+; RV32I-WITH-FP-NEXT:    call callee
 ; RV32I-WITH-FP-NEXT:    sw s7, 124(s6)
 ; RV32I-WITH-FP-NEXT:    sw s5, 120(s6)
 ; RV32I-WITH-FP-NEXT:    sw s4, 116(s6)
@@ -1229,7 +1229,7 @@ define void @caller() nounwind {
 ; RV32IZCMP-NEXT:    lw s11, 116(s1)
 ; RV32IZCMP-NEXT:    lw s2, 120(s1)
 ; RV32IZCMP-NEXT:    lw s3, 124(s1)
-; RV32IZCMP-NEXT:    call callee at plt
+; RV32IZCMP-NEXT:    call callee
 ; RV32IZCMP-NEXT:    sw s3, 124(s1)
 ; RV32IZCMP-NEXT:    sw s2, 120(s1)
 ; RV32IZCMP-NEXT:    sw s11, 116(s1)
@@ -1361,7 +1361,7 @@ define void @caller() nounwind {
 ; RV32IZCMP-WITH-FP-NEXT:    lw s4, 116(s1)
 ; RV32IZCMP-WITH-FP-NEXT:    lw s5, 120(s1)
 ; RV32IZCMP-WITH-FP-NEXT:    lw s7, 124(s1)
-; RV32IZCMP-WITH-FP-NEXT:    call callee at plt
+; RV32IZCMP-WITH-FP-NEXT:    call callee
 ; RV32IZCMP-WITH-FP-NEXT:    sw s7, 124(s1)
 ; RV32IZCMP-WITH-FP-NEXT:    sw s5, 120(s1)
 ; RV32IZCMP-WITH-FP-NEXT:    sw s4, 116(s1)
@@ -1505,7 +1505,7 @@ define void @caller() nounwind {
 ; RV64I-NEXT:    lw s11, 116(s5)
 ; RV64I-NEXT:    lw s1, 120(s5)
 ; RV64I-NEXT:    lw s2, 124(s5)
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    sw s2, 124(s5)
 ; RV64I-NEXT:    sw s1, 120(s5)
 ; RV64I-NEXT:    sw s11, 116(s5)
@@ -1650,7 +1650,7 @@ define void @caller() nounwind {
 ; RV64I-WITH-FP-NEXT:    lw s4, 116(s6)
 ; RV64I-WITH-FP-NEXT:    lw s5, 120(s6)
 ; RV64I-WITH-FP-NEXT:    lw s7, 124(s6)
-; RV64I-WITH-FP-NEXT:    call callee at plt
+; RV64I-WITH-FP-NEXT:    call callee
 ; RV64I-WITH-FP-NEXT:    sw s7, 124(s6)
 ; RV64I-WITH-FP-NEXT:    sw s5, 120(s6)
 ; RV64I-WITH-FP-NEXT:    sw s4, 116(s6)
@@ -1782,7 +1782,7 @@ define void @caller() nounwind {
 ; RV64IZCMP-NEXT:    lw s11, 116(s1)
 ; RV64IZCMP-NEXT:    lw s2, 120(s1)
 ; RV64IZCMP-NEXT:    lw s3, 124(s1)
-; RV64IZCMP-NEXT:    call callee at plt
+; RV64IZCMP-NEXT:    call callee
 ; RV64IZCMP-NEXT:    sw s3, 124(s1)
 ; RV64IZCMP-NEXT:    sw s2, 120(s1)
 ; RV64IZCMP-NEXT:    sw s11, 116(s1)
@@ -1914,7 +1914,7 @@ define void @caller() nounwind {
 ; RV64IZCMP-WITH-FP-NEXT:    lw s4, 116(s1)
 ; RV64IZCMP-WITH-FP-NEXT:    lw s5, 120(s1)
 ; RV64IZCMP-WITH-FP-NEXT:    lw s7, 124(s1)
-; RV64IZCMP-WITH-FP-NEXT:    call callee at plt
+; RV64IZCMP-WITH-FP-NEXT:    call callee
 ; RV64IZCMP-WITH-FP-NEXT:    sw s7, 124(s1)
 ; RV64IZCMP-WITH-FP-NEXT:    sw s5, 120(s1)
 ; RV64IZCMP-WITH-FP-NEXT:    sw s4, 116(s1)
@@ -2279,7 +2279,7 @@ define void @varargs(...) {
 ; RV32I-NEXT:    sw a2, 24(sp)
 ; RV32I-NEXT:    sw a1, 20(sp)
 ; RV32I-NEXT:    sw a0, 16(sp)
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
@@ -2302,7 +2302,7 @@ define void @varargs(...) {
 ; RV32I-WITH-FP-NEXT:    sw a2, 8(s0)
 ; RV32I-WITH-FP-NEXT:    sw a1, 4(s0)
 ; RV32I-WITH-FP-NEXT:    sw a0, 0(s0)
-; RV32I-WITH-FP-NEXT:    call callee at plt
+; RV32I-WITH-FP-NEXT:    call callee
 ; RV32I-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, 48
@@ -2322,7 +2322,7 @@ define void @varargs(...) {
 ; RV32IZCMP-NEXT:    sw a2, 24(sp)
 ; RV32IZCMP-NEXT:    sw a1, 20(sp)
 ; RV32IZCMP-NEXT:    sw a0, 16(sp)
-; RV32IZCMP-NEXT:    call callee at plt
+; RV32IZCMP-NEXT:    call callee
 ; RV32IZCMP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZCMP-NEXT:    addi sp, sp, 48
 ; RV32IZCMP-NEXT:    ret
@@ -2345,7 +2345,7 @@ define void @varargs(...) {
 ; RV32IZCMP-WITH-FP-NEXT:    sw a2, 8(s0)
 ; RV32IZCMP-WITH-FP-NEXT:    sw a1, 4(s0)
 ; RV32IZCMP-WITH-FP-NEXT:    sw a0, 0(s0)
-; RV32IZCMP-WITH-FP-NEXT:    call callee at plt
+; RV32IZCMP-WITH-FP-NEXT:    call callee
 ; RV32IZCMP-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZCMP-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32IZCMP-WITH-FP-NEXT:    addi sp, sp, 48
@@ -2365,7 +2365,7 @@ define void @varargs(...) {
 ; RV64I-NEXT:    sd a2, 32(sp)
 ; RV64I-NEXT:    sd a1, 24(sp)
 ; RV64I-NEXT:    sd a0, 16(sp)
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 80
 ; RV64I-NEXT:    ret
@@ -2388,7 +2388,7 @@ define void @varargs(...) {
 ; RV64I-WITH-FP-NEXT:    sd a2, 16(s0)
 ; RV64I-WITH-FP-NEXT:    sd a1, 8(s0)
 ; RV64I-WITH-FP-NEXT:    sd a0, 0(s0)
-; RV64I-WITH-FP-NEXT:    call callee at plt
+; RV64I-WITH-FP-NEXT:    call callee
 ; RV64I-WITH-FP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, 80
@@ -2408,7 +2408,7 @@ define void @varargs(...) {
 ; RV64IZCMP-NEXT:    sd a2, 32(sp)
 ; RV64IZCMP-NEXT:    sd a1, 24(sp)
 ; RV64IZCMP-NEXT:    sd a0, 16(sp)
-; RV64IZCMP-NEXT:    call callee at plt
+; RV64IZCMP-NEXT:    call callee
 ; RV64IZCMP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZCMP-NEXT:    addi sp, sp, 80
 ; RV64IZCMP-NEXT:    ret
@@ -2431,7 +2431,7 @@ define void @varargs(...) {
 ; RV64IZCMP-WITH-FP-NEXT:    sd a2, 16(s0)
 ; RV64IZCMP-WITH-FP-NEXT:    sd a1, 8(s0)
 ; RV64IZCMP-WITH-FP-NEXT:    sd a0, 0(s0)
-; RV64IZCMP-WITH-FP-NEXT:    call callee at plt
+; RV64IZCMP-WITH-FP-NEXT:    call callee
 ; RV64IZCMP-WITH-FP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZCMP-WITH-FP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64IZCMP-WITH-FP-NEXT:    addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index ad4578bda344d2..c88b2bf596ca23 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -21,8 +21,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    add a0, s0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -37,8 +37,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    addw a0, s0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -52,7 +52,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv s0, a0
 ; RV32IF-NEXT:    mv a0, a1
-; RV32IF-NEXT:    call __extendhfsf2 at plt
+; RV32IF-NEXT:    call __extendhfsf2
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    add a0, s0, a0
@@ -68,7 +68,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    mv s0, a0
 ; RV64IF-NEXT:    mv a0, a1
-; RV64IF-NEXT:    call __extendhfsf2 at plt
+; RV64IF-NEXT:    call __extendhfsf2
 ; RV64IF-NEXT:    fmv.w.x fa5, a0
 ; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    addw a0, s0, a0
@@ -83,7 +83,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-ILP32F-NEXT:    mv s0, a0
-; RV32-ILP32F-NEXT:    call __extendhfsf2 at plt
+; RV32-ILP32F-NEXT:    call __extendhfsf2
 ; RV32-ILP32F-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32-ILP32F-NEXT:    add a0, s0, a0
 ; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -97,7 +97,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-LP64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-LP64F-NEXT:    mv s0, a0
-; RV64-LP64F-NEXT:    call __extendhfsf2 at plt
+; RV64-LP64F-NEXT:    call __extendhfsf2
 ; RV64-LP64F-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-LP64F-NEXT:    addw a0, s0, a0
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -130,7 +130,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    lui a1, 4
-; RV32I-NEXT:    call callee_half_in_regs at plt
+; RV32I-NEXT:    call callee_half_in_regs
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -141,7 +141,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    lui a1, 4
-; RV64I-NEXT:    call callee_half_in_regs at plt
+; RV64I-NEXT:    call callee_half_in_regs
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -152,7 +152,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    lui a1, 1048564
-; RV32IF-NEXT:    call callee_half_in_regs at plt
+; RV32IF-NEXT:    call callee_half_in_regs
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -165,7 +165,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64IF-NEXT:    fmv.w.x fa5, a0
 ; RV64IF-NEXT:    fmv.x.w a1, fa5
 ; RV64IF-NEXT:    li a0, 1
-; RV64IF-NEXT:    call callee_half_in_regs at plt
+; RV64IF-NEXT:    call callee_half_in_regs
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -177,7 +177,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV32-ILP32F-NEXT:    lui a0, 1048564
 ; RV32-ILP32F-NEXT:    fmv.w.x fa0, a0
 ; RV32-ILP32F-NEXT:    li a0, 1
-; RV32-ILP32F-NEXT:    call callee_half_in_regs at plt
+; RV32-ILP32F-NEXT:    call callee_half_in_regs
 ; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32F-NEXT:    addi sp, sp, 16
 ; RV32-ILP32F-NEXT:    ret
@@ -189,7 +189,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64-LP64F-NEXT:    lui a0, 1048564
 ; RV64-LP64F-NEXT:    fmv.w.x fa0, a0
 ; RV64-LP64F-NEXT:    li a0, 1
-; RV64-LP64F-NEXT:    call callee_half_in_regs at plt
+; RV64-LP64F-NEXT:    call callee_half_in_regs
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
 ; RV64-LP64F-NEXT:    ret
@@ -201,7 +201,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV32-ILP32ZFHMIN-NEXT:    lui a0, 4
 ; RV32-ILP32ZFHMIN-NEXT:    fmv.h.x fa0, a0
 ; RV32-ILP32ZFHMIN-NEXT:    li a0, 1
-; RV32-ILP32ZFHMIN-NEXT:    call callee_half_in_regs at plt
+; RV32-ILP32ZFHMIN-NEXT:    call callee_half_in_regs
 ; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32-ILP32ZFHMIN-NEXT:    ret
@@ -213,7 +213,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64-LP64ZFHMIN-NEXT:    lui a0, 4
 ; RV64-LP64ZFHMIN-NEXT:    fmv.h.x fa0, a0
 ; RV64-LP64ZFHMIN-NEXT:    li a0, 1
-; RV64-LP64ZFHMIN-NEXT:    call callee_half_in_regs at plt
+; RV64-LP64ZFHMIN-NEXT:    call callee_half_in_regs
 ; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64-LP64ZFHMIN-NEXT:    ret
@@ -229,8 +229,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lhu a0, 16(sp)
 ; RV32I-NEXT:    mv s0, a7
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    add a0, s0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -244,8 +244,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lhu a0, 16(sp)
 ; RV64I-NEXT:    mv s0, a7
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    addw a0, s0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -259,7 +259,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    lhu a0, 16(sp)
 ; RV32IF-NEXT:    mv s0, a7
-; RV32IF-NEXT:    call __extendhfsf2 at plt
+; RV32IF-NEXT:    call __extendhfsf2
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    add a0, s0, a0
@@ -275,7 +275,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    lhu a0, 16(sp)
 ; RV64IF-NEXT:    mv s0, a7
-; RV64IF-NEXT:    call __extendhfsf2 at plt
+; RV64IF-NEXT:    call __extendhfsf2
 ; RV64IF-NEXT:    fmv.w.x fa5, a0
 ; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    addw a0, s0, a0
@@ -290,7 +290,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-ILP32F-NEXT:    mv s0, a7
-; RV32-ILP32F-NEXT:    call __extendhfsf2 at plt
+; RV32-ILP32F-NEXT:    call __extendhfsf2
 ; RV32-ILP32F-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32-ILP32F-NEXT:    add a0, s0, a0
 ; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -304,7 +304,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-LP64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-LP64F-NEXT:    mv s0, a7
-; RV64-LP64F-NEXT:    call __extendhfsf2 at plt
+; RV64-LP64F-NEXT:    call __extendhfsf2
 ; RV64-LP64F-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-LP64F-NEXT:    addw a0, s0, a0
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -346,7 +346,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV32I-NEXT:    li a6, 7
 ; RV32I-NEXT:    li a7, 8
 ; RV32I-NEXT:    sw t0, 0(sp)
-; RV32I-NEXT:    call callee_half_on_stack at plt
+; RV32I-NEXT:    call callee_half_on_stack
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -366,7 +366,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64I-NEXT:    li a6, 7
 ; RV64I-NEXT:    li a7, 8
 ; RV64I-NEXT:    sd t0, 0(sp)
-; RV64I-NEXT:    call callee_half_on_stack at plt
+; RV64I-NEXT:    call callee_half_on_stack
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -386,7 +386,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV32IF-NEXT:    li a6, 7
 ; RV32IF-NEXT:    li a7, 8
 ; RV32IF-NEXT:    sw t0, 0(sp)
-; RV32IF-NEXT:    call callee_half_on_stack at plt
+; RV32IF-NEXT:    call callee_half_on_stack
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -406,7 +406,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64IF-NEXT:    li a6, 7
 ; RV64IF-NEXT:    li a7, 8
 ; RV64IF-NEXT:    sw t0, 0(sp)
-; RV64IF-NEXT:    call callee_half_on_stack at plt
+; RV64IF-NEXT:    call callee_half_on_stack
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -425,7 +425,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV32-ILP32F-NEXT:    li a5, 6
 ; RV32-ILP32F-NEXT:    li a6, 7
 ; RV32-ILP32F-NEXT:    li a7, 8
-; RV32-ILP32F-NEXT:    call callee_half_on_stack at plt
+; RV32-ILP32F-NEXT:    call callee_half_on_stack
 ; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32F-NEXT:    addi sp, sp, 16
 ; RV32-ILP32F-NEXT:    ret
@@ -444,7 +444,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64-LP64F-NEXT:    li a5, 6
 ; RV64-LP64F-NEXT:    li a6, 7
 ; RV64-LP64F-NEXT:    li a7, 8
-; RV64-LP64F-NEXT:    call callee_half_on_stack at plt
+; RV64-LP64F-NEXT:    call callee_half_on_stack
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
 ; RV64-LP64F-NEXT:    ret
@@ -463,7 +463,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV32-ILP32ZFHMIN-NEXT:    li a5, 6
 ; RV32-ILP32ZFHMIN-NEXT:    li a6, 7
 ; RV32-ILP32ZFHMIN-NEXT:    li a7, 8
-; RV32-ILP32ZFHMIN-NEXT:    call callee_half_on_stack at plt
+; RV32-ILP32ZFHMIN-NEXT:    call callee_half_on_stack
 ; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32-ILP32ZFHMIN-NEXT:    ret
@@ -482,7 +482,7 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64-LP64ZFHMIN-NEXT:    li a5, 6
 ; RV64-LP64ZFHMIN-NEXT:    li a6, 7
 ; RV64-LP64ZFHMIN-NEXT:    li a7, 8
-; RV64-LP64ZFHMIN-NEXT:    call callee_half_on_stack at plt
+; RV64-LP64ZFHMIN-NEXT:    call callee_half_on_stack
 ; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64-LP64ZFHMIN-NEXT:    ret
@@ -547,11 +547,11 @@ define i32 @caller_half_ret() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call callee_half_ret at plt
+; RV32I-NEXT:    call callee_half_ret
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -560,11 +560,11 @@ define i32 @caller_half_ret() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call callee_half_ret at plt
+; RV64I-NEXT:    call callee_half_ret
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -573,8 +573,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call callee_half_ret at plt
-; RV32IF-NEXT:    call __extendhfsf2 at plt
+; RV32IF-NEXT:    call callee_half_ret
+; RV32IF-NEXT:    call __extendhfsf2
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -585,8 +585,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call callee_half_ret at plt
-; RV64IF-NEXT:    call __extendhfsf2 at plt
+; RV64IF-NEXT:    call callee_half_ret
+; RV64IF-NEXT:    call __extendhfsf2
 ; RV64IF-NEXT:    fmv.w.x fa5, a0
 ; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -597,8 +597,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV32-ILP32F:       # %bb.0:
 ; RV32-ILP32F-NEXT:    addi sp, sp, -16
 ; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32F-NEXT:    call callee_half_ret at plt
-; RV32-ILP32F-NEXT:    call __extendhfsf2 at plt
+; RV32-ILP32F-NEXT:    call callee_half_ret
+; RV32-ILP32F-NEXT:    call __extendhfsf2
 ; RV32-ILP32F-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32F-NEXT:    addi sp, sp, 16
@@ -608,8 +608,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV64-LP64F:       # %bb.0:
 ; RV64-LP64F-NEXT:    addi sp, sp, -16
 ; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64F-NEXT:    call callee_half_ret at plt
-; RV64-LP64F-NEXT:    call __extendhfsf2 at plt
+; RV64-LP64F-NEXT:    call callee_half_ret
+; RV64-LP64F-NEXT:    call __extendhfsf2
 ; RV64-LP64F-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
@@ -619,7 +619,7 @@ define i32 @caller_half_ret() nounwind {
 ; RV32-ILP32ZFHMIN:       # %bb.0:
 ; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32ZFHMIN-NEXT:    call callee_half_ret at plt
+; RV32-ILP32ZFHMIN-NEXT:    call callee_half_ret
 ; RV32-ILP32ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -630,7 +630,7 @@ define i32 @caller_half_ret() nounwind {
 ; RV64-LP64ZFHMIN:       # %bb.0:
 ; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64ZFHMIN-NEXT:    call callee_half_ret at plt
+; RV64-LP64ZFHMIN-NEXT:    call callee_half_ret
 ; RV64-LP64ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64-LP64ZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
index 24e2d31707e9e1..278187f62cd75e 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
@@ -26,7 +26,7 @@ define i32 @callee_double_in_regs(i32 %a, double %b) nounwind {
 ; RV32I-FPELIM-NEXT:    mv s0, a0
 ; RV32I-FPELIM-NEXT:    mv a0, a1
 ; RV32I-FPELIM-NEXT:    mv a1, a2
-; RV32I-FPELIM-NEXT:    call __fixdfsi at plt
+; RV32I-FPELIM-NEXT:    call __fixdfsi
 ; RV32I-FPELIM-NEXT:    add a0, s0, a0
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -43,7 +43,7 @@ define i32 @callee_double_in_regs(i32 %a, double %b) nounwind {
 ; RV32I-WITHFP-NEXT:    mv s1, a0
 ; RV32I-WITHFP-NEXT:    mv a0, a1
 ; RV32I-WITHFP-NEXT:    mv a1, a2
-; RV32I-WITHFP-NEXT:    call __fixdfsi at plt
+; RV32I-WITHFP-NEXT:    call __fixdfsi
 ; RV32I-WITHFP-NEXT:    add a0, s1, a0
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -63,7 +63,7 @@ define i32 @caller_double_in_regs() nounwind {
 ; RV32I-FPELIM-NEXT:    li a0, 1
 ; RV32I-FPELIM-NEXT:    lui a2, 262144
 ; RV32I-FPELIM-NEXT:    li a1, 0
-; RV32I-FPELIM-NEXT:    call callee_double_in_regs at plt
+; RV32I-FPELIM-NEXT:    call callee_double_in_regs
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -77,7 +77,7 @@ define i32 @caller_double_in_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    li a0, 1
 ; RV32I-WITHFP-NEXT:    lui a2, 262144
 ; RV32I-WITHFP-NEXT:    li a1, 0
-; RV32I-WITHFP-NEXT:    call callee_double_in_regs at plt
+; RV32I-WITHFP-NEXT:    call callee_double_in_regs
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -180,7 +180,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    li a6, 4
 ; RV32I-FPELIM-NEXT:    li a7, 14
 ; RV32I-FPELIM-NEXT:    sw t0, 32(sp)
-; RV32I-FPELIM-NEXT:    call callee_aligned_stack at plt
+; RV32I-FPELIM-NEXT:    call callee_aligned_stack
 ; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32I-FPELIM-NEXT:    ret
@@ -226,7 +226,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    li a6, 4
 ; RV32I-WITHFP-NEXT:    li a7, 14
 ; RV32I-WITHFP-NEXT:    sw t0, -32(s0)
-; RV32I-WITHFP-NEXT:    call callee_aligned_stack at plt
+; RV32I-WITHFP-NEXT:    call callee_aligned_stack
 ; RV32I-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 64
@@ -265,7 +265,7 @@ define i64 @caller_small_scalar_ret() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    call callee_small_scalar_ret at plt
+; RV32I-FPELIM-NEXT:    call callee_small_scalar_ret
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -276,7 +276,7 @@ define i64 @caller_small_scalar_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    call callee_small_scalar_ret at plt
+; RV32I-WITHFP-NEXT:    call callee_small_scalar_ret
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 649234efaad903..231ed159ab2061 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -54,7 +54,7 @@ define i32 @caller_i64_in_regs() nounwind {
 ; RV32I-FPELIM-NEXT:    li a0, 1
 ; RV32I-FPELIM-NEXT:    li a1, 2
 ; RV32I-FPELIM-NEXT:    li a2, 0
-; RV32I-FPELIM-NEXT:    call callee_i64_in_regs at plt
+; RV32I-FPELIM-NEXT:    call callee_i64_in_regs
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -68,7 +68,7 @@ define i32 @caller_i64_in_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    li a0, 1
 ; RV32I-WITHFP-NEXT:    li a1, 2
 ; RV32I-WITHFP-NEXT:    li a2, 0
-; RV32I-WITHFP-NEXT:    call callee_i64_in_regs at plt
+; RV32I-WITHFP-NEXT:    call callee_i64_in_regs
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -153,7 +153,7 @@ define i32 @caller_many_scalars() nounwind {
 ; RV32I-FPELIM-NEXT:    li a7, 7
 ; RV32I-FPELIM-NEXT:    sw zero, 0(sp)
 ; RV32I-FPELIM-NEXT:    li a4, 0
-; RV32I-FPELIM-NEXT:    call callee_many_scalars at plt
+; RV32I-FPELIM-NEXT:    call callee_many_scalars
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -175,7 +175,7 @@ define i32 @caller_many_scalars() nounwind {
 ; RV32I-WITHFP-NEXT:    li a7, 7
 ; RV32I-WITHFP-NEXT:    sw zero, 0(sp)
 ; RV32I-WITHFP-NEXT:    li a4, 0
-; RV32I-WITHFP-NEXT:    call callee_many_scalars at plt
+; RV32I-WITHFP-NEXT:    call callee_many_scalars
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -257,7 +257,7 @@ define i32 @caller_large_scalars() nounwind {
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 24
 ; RV32I-FPELIM-NEXT:    mv a1, sp
 ; RV32I-FPELIM-NEXT:    sw a2, 24(sp)
-; RV32I-FPELIM-NEXT:    call callee_large_scalars at plt
+; RV32I-FPELIM-NEXT:    call callee_large_scalars
 ; RV32I-FPELIM-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 48
 ; RV32I-FPELIM-NEXT:    ret
@@ -280,7 +280,7 @@ define i32 @caller_large_scalars() nounwind {
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -24
 ; RV32I-WITHFP-NEXT:    addi a1, s0, -48
 ; RV32I-WITHFP-NEXT:    sw a2, -24(s0)
-; RV32I-WITHFP-NEXT:    call callee_large_scalars at plt
+; RV32I-WITHFP-NEXT:    call callee_large_scalars
 ; RV32I-WITHFP-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 48
@@ -375,7 +375,7 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-FPELIM-NEXT:    li a6, 7
 ; RV32I-FPELIM-NEXT:    addi a7, sp, 40
 ; RV32I-FPELIM-NEXT:    sw zero, 44(sp)
-; RV32I-FPELIM-NEXT:    call callee_large_scalars_exhausted_regs at plt
+; RV32I-FPELIM-NEXT:    call callee_large_scalars_exhausted_regs
 ; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32I-FPELIM-NEXT:    ret
@@ -408,7 +408,7 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    li a6, 7
 ; RV32I-WITHFP-NEXT:    addi a7, s0, -24
 ; RV32I-WITHFP-NEXT:    sw zero, -20(s0)
-; RV32I-WITHFP-NEXT:    call callee_large_scalars_exhausted_regs at plt
+; RV32I-WITHFP-NEXT:    call callee_large_scalars_exhausted_regs
 ; RV32I-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 64
@@ -429,7 +429,7 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
 ; RV32I-FPELIM-NEXT:    mv a2, a1
 ; RV32I-FPELIM-NEXT:    mv a1, a0
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 8
-; RV32I-FPELIM-NEXT:    call __floatditf at plt
+; RV32I-FPELIM-NEXT:    call __floatditf
 ; RV32I-FPELIM-NEXT:    lw a0, 8(sp)
 ; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
@@ -444,7 +444,7 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
 ; RV32I-WITHFP-NEXT:    mv a2, a1
 ; RV32I-WITHFP-NEXT:    mv a1, a0
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -24
-; RV32I-WITHFP-NEXT:    call __floatditf at plt
+; RV32I-WITHFP-NEXT:    call __floatditf
 ; RV32I-WITHFP-NEXT:    lw a0, -24(s0)
 ; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -493,7 +493,7 @@ define i32 @caller_small_coerced_struct() nounwind {
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    li a0, 1
 ; RV32I-FPELIM-NEXT:    li a1, 2
-; RV32I-FPELIM-NEXT:    call callee_small_coerced_struct at plt
+; RV32I-FPELIM-NEXT:    call callee_small_coerced_struct
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -506,7 +506,7 @@ define i32 @caller_small_coerced_struct() nounwind {
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    li a0, 1
 ; RV32I-WITHFP-NEXT:    li a1, 2
-; RV32I-WITHFP-NEXT:    call callee_small_coerced_struct at plt
+; RV32I-WITHFP-NEXT:    call callee_small_coerced_struct
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -565,7 +565,7 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-FPELIM-NEXT:    sw a2, 16(sp)
 ; RV32I-FPELIM-NEXT:    sw a3, 20(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 8
-; RV32I-FPELIM-NEXT:    call callee_large_struct at plt
+; RV32I-FPELIM-NEXT:    call callee_large_struct
 ; RV32I-FPELIM-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 48
 ; RV32I-FPELIM-NEXT:    ret
@@ -589,7 +589,7 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-WITHFP-NEXT:    sw a2, -32(s0)
 ; RV32I-WITHFP-NEXT:    sw a3, -28(s0)
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -40
-; RV32I-WITHFP-NEXT:    call callee_large_struct at plt
+; RV32I-WITHFP-NEXT:    call callee_large_struct
 ; RV32I-WITHFP-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 48
@@ -696,7 +696,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    li a6, 4
 ; RV32I-FPELIM-NEXT:    li a7, 14
 ; RV32I-FPELIM-NEXT:    sw t0, 32(sp)
-; RV32I-FPELIM-NEXT:    call callee_aligned_stack at plt
+; RV32I-FPELIM-NEXT:    call callee_aligned_stack
 ; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32I-FPELIM-NEXT:    ret
@@ -739,7 +739,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    li a6, 4
 ; RV32I-WITHFP-NEXT:    li a7, 14
 ; RV32I-WITHFP-NEXT:    sw t0, -32(s0)
-; RV32I-WITHFP-NEXT:    call callee_aligned_stack at plt
+; RV32I-WITHFP-NEXT:    call callee_aligned_stack
 ; RV32I-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 64
@@ -782,7 +782,7 @@ define i32 @caller_small_scalar_ret() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    call callee_small_scalar_ret at plt
+; RV32I-FPELIM-NEXT:    call callee_small_scalar_ret
 ; RV32I-FPELIM-NEXT:    lui a2, 56
 ; RV32I-FPELIM-NEXT:    addi a2, a2, 580
 ; RV32I-FPELIM-NEXT:    xor a1, a1, a2
@@ -801,7 +801,7 @@ define i32 @caller_small_scalar_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    call callee_small_scalar_ret at plt
+; RV32I-WITHFP-NEXT:    call callee_small_scalar_ret
 ; RV32I-WITHFP-NEXT:    lui a2, 56
 ; RV32I-WITHFP-NEXT:    addi a2, a2, 580
 ; RV32I-WITHFP-NEXT:    xor a1, a1, a2
@@ -849,7 +849,7 @@ define i32 @caller_small_struct_ret() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    call callee_small_struct_ret at plt
+; RV32I-FPELIM-NEXT:    call callee_small_struct_ret
 ; RV32I-FPELIM-NEXT:    add a0, a0, a1
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
@@ -861,7 +861,7 @@ define i32 @caller_small_struct_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    call callee_small_struct_ret at plt
+; RV32I-WITHFP-NEXT:    call callee_small_struct_ret
 ; RV32I-WITHFP-NEXT:    add a0, a0, a1
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -911,7 +911,7 @@ define void @caller_large_scalar_ret() nounwind {
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -32
 ; RV32I-FPELIM-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    mv a0, sp
-; RV32I-FPELIM-NEXT:    call callee_large_scalar_ret at plt
+; RV32I-FPELIM-NEXT:    call callee_large_scalar_ret
 ; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
 ; RV32I-FPELIM-NEXT:    ret
@@ -923,7 +923,7 @@ define void @caller_large_scalar_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -32
-; RV32I-WITHFP-NEXT:    call callee_large_scalar_ret at plt
+; RV32I-WITHFP-NEXT:    call callee_large_scalar_ret
 ; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 32
@@ -981,7 +981,7 @@ define i32 @caller_large_struct_ret() nounwind {
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -32
 ; RV32I-FPELIM-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 8
-; RV32I-FPELIM-NEXT:    call callee_large_struct_ret at plt
+; RV32I-FPELIM-NEXT:    call callee_large_struct_ret
 ; RV32I-FPELIM-NEXT:    lw a0, 8(sp)
 ; RV32I-FPELIM-NEXT:    lw a1, 20(sp)
 ; RV32I-FPELIM-NEXT:    add a0, a0, a1
@@ -996,7 +996,7 @@ define i32 @caller_large_struct_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -24
-; RV32I-WITHFP-NEXT:    call callee_large_struct_ret at plt
+; RV32I-WITHFP-NEXT:    call callee_large_struct_ret
 ; RV32I-WITHFP-NEXT:    lw a0, -24(s0)
 ; RV32I-WITHFP-NEXT:    lw a1, -12(s0)
 ; RV32I-WITHFP-NEXT:    add a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
index 07acb9fd80a9c1..1dac139503ba75 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
@@ -20,7 +20,7 @@ define i32 @callee_float_in_regs(i32 %a, float %b) nounwind {
 ; RV32I-FPELIM-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    mv s0, a0
 ; RV32I-FPELIM-NEXT:    mv a0, a1
-; RV32I-FPELIM-NEXT:    call __fixsfsi at plt
+; RV32I-FPELIM-NEXT:    call __fixsfsi
 ; RV32I-FPELIM-NEXT:    add a0, s0, a0
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -36,7 +36,7 @@ define i32 @callee_float_in_regs(i32 %a, float %b) nounwind {
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    mv s1, a0
 ; RV32I-WITHFP-NEXT:    mv a0, a1
-; RV32I-WITHFP-NEXT:    call __fixsfsi at plt
+; RV32I-WITHFP-NEXT:    call __fixsfsi
 ; RV32I-WITHFP-NEXT:    add a0, s1, a0
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -55,7 +55,7 @@ define i32 @caller_float_in_regs() nounwind {
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    li a0, 1
 ; RV32I-FPELIM-NEXT:    lui a1, 262144
-; RV32I-FPELIM-NEXT:    call callee_float_in_regs at plt
+; RV32I-FPELIM-NEXT:    call callee_float_in_regs
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -68,7 +68,7 @@ define i32 @caller_float_in_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    li a0, 1
 ; RV32I-WITHFP-NEXT:    lui a1, 262144
-; RV32I-WITHFP-NEXT:    call callee_float_in_regs at plt
+; RV32I-WITHFP-NEXT:    call callee_float_in_regs
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -117,7 +117,7 @@ define i32 @caller_float_on_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    li a3, 0
 ; RV32I-FPELIM-NEXT:    li a5, 0
 ; RV32I-FPELIM-NEXT:    li a7, 0
-; RV32I-FPELIM-NEXT:    call callee_float_on_stack at plt
+; RV32I-FPELIM-NEXT:    call callee_float_on_stack
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -138,7 +138,7 @@ define i32 @caller_float_on_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    li a3, 0
 ; RV32I-WITHFP-NEXT:    li a5, 0
 ; RV32I-WITHFP-NEXT:    li a7, 0
-; RV32I-WITHFP-NEXT:    call callee_float_on_stack at plt
+; RV32I-WITHFP-NEXT:    call callee_float_on_stack
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -172,7 +172,7 @@ define i32 @caller_tiny_scalar_ret() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    call callee_tiny_scalar_ret at plt
+; RV32I-FPELIM-NEXT:    call callee_tiny_scalar_ret
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
@@ -183,7 +183,7 @@ define i32 @caller_tiny_scalar_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    call callee_tiny_scalar_ret at plt
+; RV32I-WITHFP-NEXT:    call callee_tiny_scalar_ret
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
index 4897170a82f2e2..bcceea7ac35b3e 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
@@ -25,7 +25,7 @@ define i32 @caller_double_in_fpr() nounwind {
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32-ILP32D-NEXT:    fld fa0, %lo(.LCPI1_0)(a0)
 ; RV32-ILP32D-NEXT:    li a0, 1
-; RV32-ILP32D-NEXT:    call callee_double_in_fpr at plt
+; RV32-ILP32D-NEXT:    call callee_double_in_fpr
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
@@ -63,7 +63,7 @@ define i32 @caller_double_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32D-NEXT:    li a3, 0
 ; RV32-ILP32D-NEXT:    li a5, 0
 ; RV32-ILP32D-NEXT:    li a7, 0
-; RV32-ILP32D-NEXT:    call callee_double_in_fpr_exhausted_gprs at plt
+; RV32-ILP32D-NEXT:    call callee_double_in_fpr_exhausted_gprs
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
@@ -114,7 +114,7 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
 ; RV32-ILP32D-NEXT:    fld fa7, %lo(.LCPI5_7)(a0)
 ; RV32-ILP32D-NEXT:    lui a1, 262688
 ; RV32-ILP32D-NEXT:    li a0, 0
-; RV32-ILP32D-NEXT:    call callee_double_in_gpr_exhausted_fprs at plt
+; RV32-ILP32D-NEXT:    call callee_double_in_gpr_exhausted_fprs
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
@@ -173,7 +173,7 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind
 ; RV32-ILP32D-NEXT:    li a3, 0
 ; RV32-ILP32D-NEXT:    li a5, 0
 ; RV32-ILP32D-NEXT:    li a7, 0
-; RV32-ILP32D-NEXT:    call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs at plt
+; RV32-ILP32D-NEXT:    call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
@@ -230,7 +230,7 @@ define i32 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32D-NEXT:    li a3, 0
 ; RV32-ILP32D-NEXT:    li a5, 0
 ; RV32-ILP32D-NEXT:    li a7, 0
-; RV32-ILP32D-NEXT:    call callee_double_on_stack_exhausted_gprs_fprs at plt
+; RV32-ILP32D-NEXT:    call callee_double_on_stack_exhausted_gprs_fprs
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
@@ -254,7 +254,7 @@ define i32 @caller_double_ret() nounwind {
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
 ; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32D-NEXT:    call callee_double_ret at plt
+; RV32-ILP32D-NEXT:    call callee_double_ret
 ; RV32-ILP32D-NEXT:    fsd fa0, 0(sp)
 ; RV32-ILP32D-NEXT:    lw a0, 0(sp)
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
index bb51f71358ad6d..b0d60a7aaa235f 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
@@ -28,7 +28,7 @@ define i32 @caller_float_in_fpr() nounwind {
 ; RV32-ILP32FD-NEXT:    lui a0, 262144
 ; RV32-ILP32FD-NEXT:    fmv.w.x fa0, a0
 ; RV32-ILP32FD-NEXT:    li a0, 1
-; RV32-ILP32FD-NEXT:    call callee_float_in_fpr at plt
+; RV32-ILP32FD-NEXT:    call callee_float_in_fpr
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
@@ -66,7 +66,7 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32FD-NEXT:    li a3, 0
 ; RV32-ILP32FD-NEXT:    li a5, 0
 ; RV32-ILP32FD-NEXT:    li a7, 0
-; RV32-ILP32FD-NEXT:    call callee_float_in_fpr_exhausted_gprs at plt
+; RV32-ILP32FD-NEXT:    call callee_float_in_fpr_exhausted_gprs
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
@@ -112,7 +112,7 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
 ; RV32-ILP32FD-NEXT:    lui a0, 266240
 ; RV32-ILP32FD-NEXT:    fmv.w.x fa7, a0
 ; RV32-ILP32FD-NEXT:    lui a0, 266496
-; RV32-ILP32FD-NEXT:    call callee_float_in_gpr_exhausted_fprs at plt
+; RV32-ILP32FD-NEXT:    call callee_float_in_gpr_exhausted_fprs
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
@@ -167,7 +167,7 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32FD-NEXT:    li a3, 0
 ; RV32-ILP32FD-NEXT:    li a5, 0
 ; RV32-ILP32FD-NEXT:    li a7, 0
-; RV32-ILP32FD-NEXT:    call callee_float_on_stack_exhausted_gprs_fprs at plt
+; RV32-ILP32FD-NEXT:    call callee_float_on_stack_exhausted_gprs_fprs
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
@@ -191,7 +191,7 @@ define i32 @caller_float_ret() nounwind {
 ; RV32-ILP32FD:       # %bb.0:
 ; RV32-ILP32FD-NEXT:    addi sp, sp, -16
 ; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32FD-NEXT:    call callee_float_ret at plt
+; RV32-ILP32FD-NEXT:    call callee_float_ret
 ; RV32-ILP32FD-NEXT:    fmv.x.w a0, fa0
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
index f424e77182a913..a1d8ea66980dd2 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
@@ -17,7 +17,7 @@ define i64 @callee_double_in_regs(i64 %a, double %b) nounwind {
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    add a0, s0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -36,7 +36,7 @@ define i64 @caller_double_in_regs() nounwind {
 ; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    slli a1, a1, 62
 ; RV64I-NEXT:    li a0, 1
-; RV64I-NEXT:    call callee_double_in_regs at plt
+; RV64I-NEXT:    call callee_double_in_regs
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -58,7 +58,7 @@ define i64 @caller_double_ret() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call callee_double_ret at plt
+; RV64I-NEXT:    call callee_double_ret
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index c2690d15665e21..d8471129433027 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -35,7 +35,7 @@ define i64 @caller_i128_in_regs() nounwind {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    li a2, 0
-; RV64I-NEXT:    call callee_i128_in_regs at plt
+; RV64I-NEXT:    call callee_i128_in_regs
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -93,7 +93,7 @@ define i32 @caller_many_scalars() nounwind {
 ; RV64I-NEXT:    li a7, 7
 ; RV64I-NEXT:    sd zero, 0(sp)
 ; RV64I-NEXT:    li a4, 0
-; RV64I-NEXT:    call callee_many_scalars at plt
+; RV64I-NEXT:    call callee_many_scalars
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
@@ -145,7 +145,7 @@ define i64 @caller_large_scalars() nounwind {
 ; RV64I-NEXT:    addi a0, sp, 32
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    sd zero, 40(sp)
-; RV64I-NEXT:    call callee_large_scalars at plt
+; RV64I-NEXT:    call callee_large_scalars
 ; RV64I-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 80
 ; RV64I-NEXT:    ret
@@ -210,7 +210,7 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV64I-NEXT:    li a6, 7
 ; RV64I-NEXT:    addi a7, sp, 48
 ; RV64I-NEXT:    sd zero, 56(sp)
-; RV64I-NEXT:    call callee_large_scalars_exhausted_regs at plt
+; RV64I-NEXT:    call callee_large_scalars_exhausted_regs
 ; RV64I-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 96
 ; RV64I-NEXT:    ret
@@ -227,7 +227,7 @@ define i64 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatditf at plt
+; RV64I-NEXT:    call __floatditf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -261,7 +261,7 @@ define i64 @caller_small_coerced_struct() nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    li a1, 2
-; RV64I-NEXT:    call callee_small_coerced_struct at plt
+; RV64I-NEXT:    call callee_small_coerced_struct
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -305,7 +305,7 @@ define i64 @caller_large_struct() nounwind {
 ; RV64I-NEXT:    sd a2, 24(sp)
 ; RV64I-NEXT:    sd a3, 32(sp)
 ; RV64I-NEXT:    addi a0, sp, 8
-; RV64I-NEXT:    call callee_large_struct at plt
+; RV64I-NEXT:    call callee_large_struct
 ; RV64I-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 80
 ; RV64I-NEXT:    ret
@@ -375,7 +375,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV64I-NEXT:    li a7, 7
 ; RV64I-NEXT:    sd a6, 0(sp)
 ; RV64I-NEXT:    li a6, 0
-; RV64I-NEXT:    call callee_aligned_stack at plt
+; RV64I-NEXT:    call callee_aligned_stack
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
 ; RV64I-NEXT:    ret
@@ -400,7 +400,7 @@ define i64 @caller_small_scalar_ret() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call callee_small_scalar_ret at plt
+; RV64I-NEXT:    call callee_small_scalar_ret
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xori a0, a0, -2
 ; RV64I-NEXT:    or a0, a0, a1
@@ -430,7 +430,7 @@ define i64 @caller_small_struct_ret() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call callee_small_struct_ret at plt
+; RV64I-NEXT:    call callee_small_struct_ret
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -465,7 +465,7 @@ define void @caller_large_scalar_ret() nounwind {
 ; RV64I-NEXT:    addi sp, sp, -48
 ; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee_large_scalar_ret at plt
+; RV64I-NEXT:    call callee_large_scalar_ret
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
@@ -507,7 +507,7 @@ define i64 @caller_large_struct_ret() nounwind {
 ; RV64I-NEXT:    addi sp, sp, -48
 ; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, sp, 8
-; RV64I-NEXT:    call callee_large_struct_ret at plt
+; RV64I-NEXT:    call callee_large_struct_ret
 ; RV64I-NEXT:    ld a0, 8(sp)
 ; RV64I-NEXT:    ld a1, 32(sp)
 ; RV64I-NEXT:    add a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
index bf98412bc315cf..c2db8fe5248fd4 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
@@ -22,7 +22,7 @@ define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
 ; RV64I-FPELIM-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-FPELIM-NEXT:    mv s0, a0
 ; RV64I-FPELIM-NEXT:    mv a0, a1
-; RV64I-FPELIM-NEXT:    call __fixsfdi at plt
+; RV64I-FPELIM-NEXT:    call __fixsfdi
 ; RV64I-FPELIM-NEXT:    add a0, s0, a0
 ; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -38,7 +38,7 @@ define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV64I-WITHFP-NEXT:    mv s1, a0
 ; RV64I-WITHFP-NEXT:    mv a0, a1
-; RV64I-WITHFP-NEXT:    call __fixsfdi at plt
+; RV64I-WITHFP-NEXT:    call __fixsfdi
 ; RV64I-WITHFP-NEXT:    add a0, s1, a0
 ; RV64I-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -57,7 +57,7 @@ define i64 @caller_float_in_regs() nounwind {
 ; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-FPELIM-NEXT:    li a0, 1
 ; RV64I-FPELIM-NEXT:    lui a1, 262144
-; RV64I-FPELIM-NEXT:    call callee_float_in_regs at plt
+; RV64I-FPELIM-NEXT:    call callee_float_in_regs
 ; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV64I-FPELIM-NEXT:    ret
@@ -70,7 +70,7 @@ define i64 @caller_float_in_regs() nounwind {
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV64I-WITHFP-NEXT:    li a0, 1
 ; RV64I-WITHFP-NEXT:    lui a1, 262144
-; RV64I-WITHFP-NEXT:    call callee_float_in_regs at plt
+; RV64I-WITHFP-NEXT:    call callee_float_in_regs
 ; RV64I-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 16
@@ -118,7 +118,7 @@ define i64 @caller_float_on_stack() nounwind {
 ; RV64I-FPELIM-NEXT:    li a3, 0
 ; RV64I-FPELIM-NEXT:    li a5, 0
 ; RV64I-FPELIM-NEXT:    li a7, 0
-; RV64I-FPELIM-NEXT:    call callee_float_on_stack at plt
+; RV64I-FPELIM-NEXT:    call callee_float_on_stack
 ; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV64I-FPELIM-NEXT:    ret
@@ -139,7 +139,7 @@ define i64 @caller_float_on_stack() nounwind {
 ; RV64I-WITHFP-NEXT:    li a3, 0
 ; RV64I-WITHFP-NEXT:    li a5, 0
 ; RV64I-WITHFP-NEXT:    li a7, 0
-; RV64I-WITHFP-NEXT:    call callee_float_on_stack at plt
+; RV64I-WITHFP-NEXT:    call callee_float_on_stack
 ; RV64I-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 32
@@ -176,7 +176,7 @@ define i64 @caller_tiny_scalar_ret() nounwind {
 ; RV64I-FPELIM:       # %bb.0:
 ; RV64I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-FPELIM-NEXT:    call callee_tiny_scalar_ret at plt
+; RV64I-FPELIM-NEXT:    call callee_tiny_scalar_ret
 ; RV64I-FPELIM-NEXT:    sext.w a0, a0
 ; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
@@ -188,7 +188,7 @@ define i64 @caller_tiny_scalar_ret() nounwind {
 ; RV64I-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 16
-; RV64I-WITHFP-NEXT:    call callee_tiny_scalar_ret at plt
+; RV64I-WITHFP-NEXT:    call callee_tiny_scalar_ret
 ; RV64I-WITHFP-NEXT:    sext.w a0, a0
 ; RV64I-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
index a74f7bbe2331b2..37d9eb6990b0e4 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
@@ -43,7 +43,7 @@ define float @caller_onstack_f32_noop(float %a) nounwind {
 ; RV32IF-NEXT:    li a3, 0
 ; RV32IF-NEXT:    li a5, 0
 ; RV32IF-NEXT:    li a7, 0
-; RV32IF-NEXT:    call onstack_f32_noop at plt
+; RV32IF-NEXT:    call onstack_f32_noop
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -70,7 +70,7 @@ define float @caller_onstack_f32_fadd(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    li a3, 0
 ; RV32IF-NEXT:    li a5, 0
 ; RV32IF-NEXT:    li a7, 0
-; RV32IF-NEXT:    call onstack_f32_noop at plt
+; RV32IF-NEXT:    call onstack_f32_noop
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
index ac060f9469ac9c..5bae6b1d7f5440 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
@@ -16,7 +16,7 @@ define void @pass_uint8_as_uint8(i8 zeroext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call receive_uint8 at plt
+; RV32I-NEXT:    call receive_uint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -31,7 +31,7 @@ define zeroext i8 @ret_callresult_uint8_as_uint8() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_uint8 at plt
+; RV32I-NEXT:    call return_uint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -57,7 +57,7 @@ define void @pass_uint8_as_sint8(i8 zeroext %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    call receive_sint8 at plt
+; RV32I-NEXT:    call receive_sint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -71,7 +71,7 @@ define signext i8 @ret_callresult_uint8_as_sint8() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_uint8 at plt
+; RV32I-NEXT:    call return_uint8
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -96,7 +96,7 @@ define void @pass_uint8_as_anyint32(i8 zeroext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call receive_anyint32 at plt
+; RV32I-NEXT:    call receive_anyint32
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -110,7 +110,7 @@ define signext i32 @ret_callresult_uint8_as_anyint32() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_uint8 at plt
+; RV32I-NEXT:    call return_uint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -133,7 +133,7 @@ define void @pass_sint8_as_uint8(i8 signext %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    call receive_uint8 at plt
+; RV32I-NEXT:    call receive_uint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -148,7 +148,7 @@ define zeroext i8 @ret_callresult_sint8_as_uint8() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_sint8 at plt
+; RV32I-NEXT:    call return_sint8
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -169,7 +169,7 @@ define void @pass_sint8_as_sint8(i8 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call receive_sint8 at plt
+; RV32I-NEXT:    call receive_sint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -182,7 +182,7 @@ define signext i8 @ret_callresult_sint8_as_sint8() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_sint8 at plt
+; RV32I-NEXT:    call return_sint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -203,7 +203,7 @@ define void @pass_sint8_as_anyint32(i8 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call receive_anyint32 at plt
+; RV32I-NEXT:    call receive_anyint32
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -217,7 +217,7 @@ define signext i32 @ret_callresult_sint8_as_anyint32() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_sint8 at plt
+; RV32I-NEXT:    call return_sint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -241,7 +241,7 @@ define void @pass_anyint32_as_uint8(i32 signext %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    call receive_uint8 at plt
+; RV32I-NEXT:    call receive_uint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -257,7 +257,7 @@ define zeroext i8 @ret_callresult_anyint32_as_uint8() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_anyint32 at plt
+; RV32I-NEXT:    call return_anyint32
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -284,7 +284,7 @@ define void @pass_anyint32_as_sint8(i32 signext %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    call receive_sint8 at plt
+; RV32I-NEXT:    call receive_sint8
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -298,7 +298,7 @@ define signext i8 @ret_callresult_anyint32_as_sint8() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_anyint32 at plt
+; RV32I-NEXT:    call return_anyint32
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -321,7 +321,7 @@ define void @pass_anyint32_as_anyint32(i32 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call receive_anyint32 at plt
+; RV32I-NEXT:    call receive_anyint32
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -334,7 +334,7 @@ define signext i32 @ret_callresult_anyint32_as_anyint32() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call return_anyint32 at plt
+; RV32I-NEXT:    call return_anyint32
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll b/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll
index 3e2af113652976..70cdb6cec2449f 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll
@@ -31,7 +31,7 @@ define void @bar() nounwind {
 ; CHECK-NEXT:    li a6, 0
 ; CHECK-NEXT:    li a7, 0
 ; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    call foo at plt
+; CHECK-NEXT:    call foo
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    addi sp, s0, -96
 ; CHECK-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calls.ll b/llvm/test/CodeGen/RISCV/calls.ll
index e3459875362d45..365f255dd82447 100644
--- a/llvm/test/CodeGen/RISCV/calls.ll
+++ b/llvm/test/CodeGen/RISCV/calls.ll
@@ -11,7 +11,7 @@ define i32 @test_call_external(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call external_function at plt
+; RV32I-NEXT:    call external_function
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -20,7 +20,7 @@ define i32 @test_call_external(i32 %a) nounwind {
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
 ; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-PIC-NEXT:    call external_function at plt
+; RV32I-PIC-NEXT:    call external_function
 ; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
@@ -71,7 +71,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call defined_function at plt
+; RV32I-NEXT:    call defined_function
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -80,7 +80,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
 ; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-PIC-NEXT:    call defined_function at plt
+; RV32I-PIC-NEXT:    call defined_function
 ; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
@@ -178,7 +178,7 @@ define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    call fastcc_function at plt
+; RV32I-NEXT:    call fastcc_function
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -191,7 +191,7 @@ define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
 ; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    mv s0, a0
-; RV32I-PIC-NEXT:    call fastcc_function at plt
+; RV32I-PIC-NEXT:    call fastcc_function
 ; RV32I-PIC-NEXT:    mv a0, s0
 ; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -219,7 +219,7 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
 ; RV32I-NEXT:    mv a5, a0
 ; RV32I-NEXT:    mv a6, a0
 ; RV32I-NEXT:    mv a7, a0
-; RV32I-NEXT:    call external_many_args at plt
+; RV32I-NEXT:    call external_many_args
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -241,7 +241,7 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
 ; RV32I-PIC-NEXT:    mv a5, a0
 ; RV32I-PIC-NEXT:    mv a6, a0
 ; RV32I-PIC-NEXT:    mv a7, a0
-; RV32I-PIC-NEXT:    call external_many_args at plt
+; RV32I-PIC-NEXT:    call external_many_args
 ; RV32I-PIC-NEXT:    mv a0, s0
 ; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -282,7 +282,7 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
 ; RV32I-NEXT:    mv a5, a0
 ; RV32I-NEXT:    mv a6, a0
 ; RV32I-NEXT:    mv a7, a0
-; RV32I-NEXT:    call defined_many_args at plt
+; RV32I-NEXT:    call defined_many_args
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -300,7 +300,7 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
 ; RV32I-PIC-NEXT:    mv a5, a0
 ; RV32I-PIC-NEXT:    mv a6, a0
 ; RV32I-PIC-NEXT:    mv a7, a0
-; RV32I-PIC-NEXT:    call defined_many_args at plt
+; RV32I-PIC-NEXT:    call defined_many_args
 ; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
index 7992c2cda28d5b..2103c3e60b591c 100644
--- a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
+++ b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
@@ -21,11 +21,11 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; CHECK32I-NEXT:    mv s0, a1
 ; CHECK32I-NEXT:    mv s1, a0
-; CHECK32I-NEXT:    call func at plt
+; CHECK32I-NEXT:    call func
 ; CHECK32I-NEXT:    mv s2, a0
 ; CHECK32I-NEXT:    mv a0, s1
 ; CHECK32I-NEXT:    mv a1, s0
-; CHECK32I-NEXT:    call func at plt
+; CHECK32I-NEXT:    call func
 ; CHECK32I-NEXT:    add a0, s2, s0
 ; CHECK32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -38,10 +38,10 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK32ZCMP:       # %bb.0:
 ; CHECK32ZCMP-NEXT:    cm.push {ra, s0-s2}, -16
 ; CHECK32ZCMP-NEXT:    cm.mvsa01 s1, s0
-; CHECK32ZCMP-NEXT:    call func at plt
+; CHECK32ZCMP-NEXT:    call func
 ; CHECK32ZCMP-NEXT:    mv s2, a0
 ; CHECK32ZCMP-NEXT:    cm.mva01s s1, s0
-; CHECK32ZCMP-NEXT:    call func at plt
+; CHECK32ZCMP-NEXT:    call func
 ; CHECK32ZCMP-NEXT:    add a0, s2, s0
 ; CHECK32ZCMP-NEXT:    cm.popret {ra, s0-s2}, 16
 ;
@@ -54,11 +54,11 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; CHECK64I-NEXT:    mv s0, a1
 ; CHECK64I-NEXT:    mv s1, a0
-; CHECK64I-NEXT:    call func at plt
+; CHECK64I-NEXT:    call func
 ; CHECK64I-NEXT:    mv s2, a0
 ; CHECK64I-NEXT:    mv a0, s1
 ; CHECK64I-NEXT:    mv a1, s0
-; CHECK64I-NEXT:    call func at plt
+; CHECK64I-NEXT:    call func
 ; CHECK64I-NEXT:    addw a0, s2, s0
 ; CHECK64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -71,10 +71,10 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK64ZCMP:       # %bb.0:
 ; CHECK64ZCMP-NEXT:    cm.push {ra, s0-s2}, -32
 ; CHECK64ZCMP-NEXT:    cm.mvsa01 s1, s0
-; CHECK64ZCMP-NEXT:    call func at plt
+; CHECK64ZCMP-NEXT:    call func
 ; CHECK64ZCMP-NEXT:    mv s2, a0
 ; CHECK64ZCMP-NEXT:    cm.mva01s s1, s0
-; CHECK64ZCMP-NEXT:    call func at plt
+; CHECK64ZCMP-NEXT:    call func
 ; CHECK64ZCMP-NEXT:    addw a0, s2, s0
 ; CHECK64ZCMP-NEXT:    cm.popret {ra, s0-s2}, 32
   %call = call i32 @func(i32 %num, i32 %f)
@@ -91,15 +91,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; CHECK32I-NEXT:    mv s0, a1
-; CHECK32I-NEXT:    call foo at plt
+; CHECK32I-NEXT:    call foo
 ; CHECK32I-NEXT:    mv s1, a0
 ; CHECK32I-NEXT:    mv a0, s0
-; CHECK32I-NEXT:    call foo at plt
+; CHECK32I-NEXT:    call foo
 ; CHECK32I-NEXT:    mv a0, s1
-; CHECK32I-NEXT:    call foo at plt
+; CHECK32I-NEXT:    call foo
 ; CHECK32I-NEXT:    li a0, 1
 ; CHECK32I-NEXT:    mv a1, s0
-; CHECK32I-NEXT:    call func at plt
+; CHECK32I-NEXT:    call func
 ; CHECK32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; CHECK32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -110,15 +110,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK32ZCMP:       # %bb.0:
 ; CHECK32ZCMP-NEXT:    cm.push {ra, s0-s1}, -16
 ; CHECK32ZCMP-NEXT:    mv s0, a1
-; CHECK32ZCMP-NEXT:    call foo at plt
+; CHECK32ZCMP-NEXT:    call foo
 ; CHECK32ZCMP-NEXT:    mv s1, a0
 ; CHECK32ZCMP-NEXT:    mv a0, s0
-; CHECK32ZCMP-NEXT:    call foo at plt
+; CHECK32ZCMP-NEXT:    call foo
 ; CHECK32ZCMP-NEXT:    mv a0, s1
-; CHECK32ZCMP-NEXT:    call foo at plt
+; CHECK32ZCMP-NEXT:    call foo
 ; CHECK32ZCMP-NEXT:    li a0, 1
 ; CHECK32ZCMP-NEXT:    mv a1, s0
-; CHECK32ZCMP-NEXT:    call func at plt
+; CHECK32ZCMP-NEXT:    call func
 ; CHECK32ZCMP-NEXT:    cm.popret {ra, s0-s1}, 16
 ;
 ; CHECK64I-LABEL: not_zcmp_mv:
@@ -128,15 +128,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; CHECK64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; CHECK64I-NEXT:    mv s0, a1
-; CHECK64I-NEXT:    call foo at plt
+; CHECK64I-NEXT:    call foo
 ; CHECK64I-NEXT:    mv s1, a0
 ; CHECK64I-NEXT:    mv a0, s0
-; CHECK64I-NEXT:    call foo at plt
+; CHECK64I-NEXT:    call foo
 ; CHECK64I-NEXT:    mv a0, s1
-; CHECK64I-NEXT:    call foo at plt
+; CHECK64I-NEXT:    call foo
 ; CHECK64I-NEXT:    li a0, 1
 ; CHECK64I-NEXT:    mv a1, s0
-; CHECK64I-NEXT:    call func at plt
+; CHECK64I-NEXT:    call func
 ; CHECK64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -147,15 +147,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK64ZCMP:       # %bb.0:
 ; CHECK64ZCMP-NEXT:    cm.push {ra, s0-s1}, -32
 ; CHECK64ZCMP-NEXT:    mv s0, a1
-; CHECK64ZCMP-NEXT:    call foo at plt
+; CHECK64ZCMP-NEXT:    call foo
 ; CHECK64ZCMP-NEXT:    mv s1, a0
 ; CHECK64ZCMP-NEXT:    mv a0, s0
-; CHECK64ZCMP-NEXT:    call foo at plt
+; CHECK64ZCMP-NEXT:    call foo
 ; CHECK64ZCMP-NEXT:    mv a0, s1
-; CHECK64ZCMP-NEXT:    call foo at plt
+; CHECK64ZCMP-NEXT:    call foo
 ; CHECK64ZCMP-NEXT:    li a0, 1
 ; CHECK64ZCMP-NEXT:    mv a1, s0
-; CHECK64ZCMP-NEXT:    call func at plt
+; CHECK64ZCMP-NEXT:    call func
 ; CHECK64ZCMP-NEXT:    cm.popret {ra, s0-s1}, 32
   %call = call i32 @foo(i32 %num)
   %call1 = call i32 @foo(i32 %f)
diff --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll
index bce6707781c079..23f219c2487ebc 100644
--- a/llvm/test/CodeGen/RISCV/condops.ll
+++ b/llvm/test/CodeGen/RISCV/condops.ll
@@ -3092,7 +3092,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
 ; RV32I-NEXT:  .LBB56_1: # %bb2
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call bar at plt
+; RV32I-NEXT:    call bar
 ; RV32I-NEXT:    sll s1, s1, s0
 ; RV32I-NEXT:    bnez a0, .LBB56_1
 ; RV32I-NEXT:  # %bb.2: # %bb7
@@ -3115,7 +3115,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
 ; RV64I-NEXT:  .LBB56_1: # %bb2
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call bar at plt
+; RV64I-NEXT:    call bar
 ; RV64I-NEXT:    sllw s1, s1, s0
 ; RV64I-NEXT:    bnez a0, .LBB56_1
 ; RV64I-NEXT:  # %bb.2: # %bb7
@@ -3137,7 +3137,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
 ; RV64XVENTANACONDOPS-NEXT:  .LBB56_1: # %bb2
 ; RV64XVENTANACONDOPS-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64XVENTANACONDOPS-NEXT:    mv a0, s1
-; RV64XVENTANACONDOPS-NEXT:    call bar at plt
+; RV64XVENTANACONDOPS-NEXT:    call bar
 ; RV64XVENTANACONDOPS-NEXT:    sllw s1, s1, s0
 ; RV64XVENTANACONDOPS-NEXT:    bnez a0, .LBB56_1
 ; RV64XVENTANACONDOPS-NEXT:  # %bb.2: # %bb7
@@ -3160,7 +3160,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
 ; RV64XTHEADCONDMOV-NEXT:  .LBB56_1: # %bb2
 ; RV64XTHEADCONDMOV-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64XTHEADCONDMOV-NEXT:    sext.w a0, s1
-; RV64XTHEADCONDMOV-NEXT:    call bar at plt
+; RV64XTHEADCONDMOV-NEXT:    call bar
 ; RV64XTHEADCONDMOV-NEXT:    sllw s1, s1, s0
 ; RV64XTHEADCONDMOV-NEXT:    bnez a0, .LBB56_1
 ; RV64XTHEADCONDMOV-NEXT:  # %bb.2: # %bb7
@@ -3182,7 +3182,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
 ; RV32ZICOND-NEXT:  .LBB56_1: # %bb2
 ; RV32ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32ZICOND-NEXT:    mv a0, s1
-; RV32ZICOND-NEXT:    call bar at plt
+; RV32ZICOND-NEXT:    call bar
 ; RV32ZICOND-NEXT:    sll s1, s1, s0
 ; RV32ZICOND-NEXT:    bnez a0, .LBB56_1
 ; RV32ZICOND-NEXT:  # %bb.2: # %bb7
@@ -3204,7 +3204,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
 ; RV64ZICOND-NEXT:  .LBB56_1: # %bb2
 ; RV64ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64ZICOND-NEXT:    mv a0, s1
-; RV64ZICOND-NEXT:    call bar at plt
+; RV64ZICOND-NEXT:    call bar
 ; RV64ZICOND-NEXT:    sllw s1, s1, s0
 ; RV64ZICOND-NEXT:    bnez a0, .LBB56_1
 ; RV64ZICOND-NEXT:  # %bb.2: # %bb7
@@ -3243,7 +3243,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
 ; RV32I-NEXT:  .LBB57_1: # %bb2
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call bar at plt
+; RV32I-NEXT:    call bar
 ; RV32I-NEXT:    sll s1, s1, s0
 ; RV32I-NEXT:    bnez a0, .LBB57_1
 ; RV32I-NEXT:  # %bb.2: # %bb7
@@ -3266,7 +3266,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
 ; RV64I-NEXT:  .LBB57_1: # %bb2
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call bar at plt
+; RV64I-NEXT:    call bar
 ; RV64I-NEXT:    sllw s1, s1, s0
 ; RV64I-NEXT:    bnez a0, .LBB57_1
 ; RV64I-NEXT:  # %bb.2: # %bb7
@@ -3288,7 +3288,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
 ; RV64XVENTANACONDOPS-NEXT:  .LBB57_1: # %bb2
 ; RV64XVENTANACONDOPS-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64XVENTANACONDOPS-NEXT:    mv a0, s1
-; RV64XVENTANACONDOPS-NEXT:    call bar at plt
+; RV64XVENTANACONDOPS-NEXT:    call bar
 ; RV64XVENTANACONDOPS-NEXT:    sllw s1, s1, s0
 ; RV64XVENTANACONDOPS-NEXT:    bnez a0, .LBB57_1
 ; RV64XVENTANACONDOPS-NEXT:  # %bb.2: # %bb7
@@ -3311,7 +3311,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
 ; RV64XTHEADCONDMOV-NEXT:  .LBB57_1: # %bb2
 ; RV64XTHEADCONDMOV-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64XTHEADCONDMOV-NEXT:    sext.w a0, s1
-; RV64XTHEADCONDMOV-NEXT:    call bar at plt
+; RV64XTHEADCONDMOV-NEXT:    call bar
 ; RV64XTHEADCONDMOV-NEXT:    sllw s1, s1, s0
 ; RV64XTHEADCONDMOV-NEXT:    bnez a0, .LBB57_1
 ; RV64XTHEADCONDMOV-NEXT:  # %bb.2: # %bb7
@@ -3333,7 +3333,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
 ; RV32ZICOND-NEXT:  .LBB57_1: # %bb2
 ; RV32ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32ZICOND-NEXT:    mv a0, s1
-; RV32ZICOND-NEXT:    call bar at plt
+; RV32ZICOND-NEXT:    call bar
 ; RV32ZICOND-NEXT:    sll s1, s1, s0
 ; RV32ZICOND-NEXT:    bnez a0, .LBB57_1
 ; RV32ZICOND-NEXT:  # %bb.2: # %bb7
@@ -3355,7 +3355,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
 ; RV64ZICOND-NEXT:  .LBB57_1: # %bb2
 ; RV64ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64ZICOND-NEXT:    mv a0, s1
-; RV64ZICOND-NEXT:    call bar at plt
+; RV64ZICOND-NEXT:    call bar
 ; RV64ZICOND-NEXT:    sllw s1, s1, s0
 ; RV64ZICOND-NEXT:    bnez a0, .LBB57_1
 ; RV64ZICOND-NEXT:  # %bb.2: # %bb7
@@ -3505,7 +3505,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV32I-NEXT:    beqz a1, .LBB60_4
 ; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call bat at plt
+; RV32I-NEXT:    call bat
 ; RV32I-NEXT:  .LBB60_4:
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3526,7 +3526,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV64I-NEXT:    beqz a1, .LBB60_4
 ; RV64I-NEXT:  # %bb.3:
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call bat at plt
+; RV64I-NEXT:    call bat
 ; RV64I-NEXT:  .LBB60_4:
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3545,7 +3545,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV64XVENTANACONDOPS-NEXT:    beqz a1, .LBB60_2
 ; RV64XVENTANACONDOPS-NEXT:  # %bb.1:
 ; RV64XVENTANACONDOPS-NEXT:    mv a0, s0
-; RV64XVENTANACONDOPS-NEXT:    call bat at plt
+; RV64XVENTANACONDOPS-NEXT:    call bat
 ; RV64XVENTANACONDOPS-NEXT:  .LBB60_2:
 ; RV64XVENTANACONDOPS-NEXT:    mv a0, s0
 ; RV64XVENTANACONDOPS-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3563,7 +3563,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV64XTHEADCONDMOV-NEXT:    beqz a1, .LBB60_2
 ; RV64XTHEADCONDMOV-NEXT:  # %bb.1:
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, s0
-; RV64XTHEADCONDMOV-NEXT:    call bat at plt
+; RV64XTHEADCONDMOV-NEXT:    call bat
 ; RV64XTHEADCONDMOV-NEXT:  .LBB60_2:
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, s0
 ; RV64XTHEADCONDMOV-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3582,7 +3582,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV32ZICOND-NEXT:    beqz a1, .LBB60_2
 ; RV32ZICOND-NEXT:  # %bb.1:
 ; RV32ZICOND-NEXT:    mv a0, s0
-; RV32ZICOND-NEXT:    call bat at plt
+; RV32ZICOND-NEXT:    call bat
 ; RV32ZICOND-NEXT:  .LBB60_2:
 ; RV32ZICOND-NEXT:    mv a0, s0
 ; RV32ZICOND-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3601,7 +3601,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV64ZICOND-NEXT:    beqz a1, .LBB60_2
 ; RV64ZICOND-NEXT:  # %bb.1:
 ; RV64ZICOND-NEXT:    mv a0, s0
-; RV64ZICOND-NEXT:    call bat at plt
+; RV64ZICOND-NEXT:    call bat
 ; RV64ZICOND-NEXT:  .LBB60_2:
 ; RV64ZICOND-NEXT:    mv a0, s0
 ; RV64ZICOND-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index 931db00d06f8b6..accd52369fa1cd 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -164,7 +164,7 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fa1
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
 ; RV32IFD-NEXT:    fcvt.d.s fa5, fa0
 ; RV32IFD-NEXT:    fsgnj.d fa0, fs0, fa5
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -179,7 +179,7 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
 ; RV64IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    fmv.s fa0, fa1
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
 ; RV64IFD-NEXT:    fcvt.d.s fa5, fa0
 ; RV64IFD-NEXT:    fsgnj.d fa0, fs0, fa5
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -265,7 +265,7 @@ define float @fold_promote_f_h(float %a, half %b) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    fmv.s fa0, fa1
-; RV32IF-NEXT:    call __extendhfsf2 at plt
+; RV32IF-NEXT:    call __extendhfsf2
 ; RV32IF-NEXT:    fsgnj.s fa0, fs0, fa0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
@@ -279,7 +279,7 @@ define float @fold_promote_f_h(float %a, half %b) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.s fs0, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fa1
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
 ; RV32IFD-NEXT:    fsgnj.s fa0, fs0, fa0
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
@@ -293,7 +293,7 @@ define float @fold_promote_f_h(float %a, half %b) nounwind {
 ; RV64IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.s fs0, fa0
 ; RV64IFD-NEXT:    fmv.s fa0, fa1
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
 ; RV64IFD-NEXT:    fsgnj.s fa0, fs0, fa0
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index da67176e3f0ca0..455e6e54c9b396 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -244,7 +244,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
@@ -268,7 +268,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
@@ -381,14 +381,14 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32I-NEXT:    addi s4, a0, %lo(.LCPI3_0)
 ; RV32I-NEXT:    neg a0, s2
 ; RV32I-NEXT:    and a0, s2, a0
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s2, .LBB3_3
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    li a0, 32
@@ -426,7 +426,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI3_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI3_1)
@@ -706,7 +706,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -724,7 +724,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -812,14 +812,14 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a0, %hi(.LCPI7_0)
 ; RV32I-NEXT:    addi s4, a0, %lo(.LCPI7_0)
 ; RV32I-NEXT:    neg a0, s1
 ; RV32I-NEXT:    and a0, s1, a0
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s2, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 27
@@ -850,7 +850,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_1)
@@ -1191,7 +1191,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1236,7 +1236,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1395,7 +1395,7 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s3, a1, 257
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    srli a0, s2, 1
 ; RV32I-NEXT:    or a0, s2, a0
@@ -1419,7 +1419,7 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s0, .LBB11_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -1485,7 +1485,7 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1862,7 +1862,7 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1901,7 +1901,7 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2046,7 +2046,7 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s3, a1, 257
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    srli a0, s2, 1
 ; RV32I-NEXT:    or a0, s2, a0
@@ -2070,7 +2070,7 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s0, .LBB15_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -2134,7 +2134,7 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2484,7 +2484,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2512,7 +2512,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2598,7 +2598,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV32XTHEADBB-NEXT:    and a0, a0, a1
 ; RV32XTHEADBB-NEXT:    lui a1, 4112
 ; RV32XTHEADBB-NEXT:    addi a1, a1, 257
-; RV32XTHEADBB-NEXT:    call __mulsi3 at plt
+; RV32XTHEADBB-NEXT:    call __mulsi3
 ; RV32XTHEADBB-NEXT:    srli a0, a0, 24
 ; RV32XTHEADBB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32XTHEADBB-NEXT:    addi sp, sp, 16
@@ -2626,7 +2626,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV64XTHEADBB-NEXT:    and a0, a0, a1
 ; RV64XTHEADBB-NEXT:    lui a1, 4112
 ; RV64XTHEADBB-NEXT:    addiw a1, a1, 257
-; RV64XTHEADBB-NEXT:    call __muldi3 at plt
+; RV64XTHEADBB-NEXT:    call __muldi3
 ; RV64XTHEADBB-NEXT:    srliw a0, a0, 24
 ; RV64XTHEADBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64XTHEADBB-NEXT:    addi sp, sp, 16
@@ -2666,7 +2666,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli s5, a0, 24
 ; RV32I-NEXT:    srli a0, s0, 1
 ; RV32I-NEXT:    and a0, a0, s2
@@ -2679,7 +2679,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s4
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    add a0, a0, s5
 ; RV32I-NEXT:    li a1, 0
@@ -2723,7 +2723,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2842,7 +2842,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV32XTHEADBB-NEXT:    lui a1, 4112
 ; RV32XTHEADBB-NEXT:    addi s1, a1, 257
 ; RV32XTHEADBB-NEXT:    mv a1, s1
-; RV32XTHEADBB-NEXT:    call __mulsi3 at plt
+; RV32XTHEADBB-NEXT:    call __mulsi3
 ; RV32XTHEADBB-NEXT:    srli s5, a0, 24
 ; RV32XTHEADBB-NEXT:    srli a0, s0, 1
 ; RV32XTHEADBB-NEXT:    and a0, a0, s2
@@ -2855,7 +2855,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV32XTHEADBB-NEXT:    add a0, a0, a1
 ; RV32XTHEADBB-NEXT:    and a0, a0, s4
 ; RV32XTHEADBB-NEXT:    mv a1, s1
-; RV32XTHEADBB-NEXT:    call __mulsi3 at plt
+; RV32XTHEADBB-NEXT:    call __mulsi3
 ; RV32XTHEADBB-NEXT:    srli a0, a0, 24
 ; RV32XTHEADBB-NEXT:    add a0, a0, s5
 ; RV32XTHEADBB-NEXT:    li a1, 0
@@ -2899,7 +2899,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV64XTHEADBB-NEXT:    addiw a1, a1, 257
 ; RV64XTHEADBB-NEXT:    slli a2, a1, 32
 ; RV64XTHEADBB-NEXT:    add a1, a1, a2
-; RV64XTHEADBB-NEXT:    call __muldi3 at plt
+; RV64XTHEADBB-NEXT:    call __muldi3
 ; RV64XTHEADBB-NEXT:    srli a0, a0, 56
 ; RV64XTHEADBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64XTHEADBB-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index 9bfd30daf7d4ee..a60fd26f49594c 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -46,14 +46,14 @@ define signext i32 @ctz_dereferencing_pointer(i64* %b) nounwind {
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi s1, a1, 1329
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a0, %hi(.LCPI0_0)
 ; RV32I-NEXT:    addi s3, a0, %lo(.LCPI0_0)
 ; RV32I-NEXT:    neg a0, s4
 ; RV32I-NEXT:    and a0, s4, a0
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s4, .LBB0_3
 ; RV32I-NEXT:  # %bb.1: # %entry
 ; RV32I-NEXT:    li a0, 32
@@ -91,7 +91,7 @@ define signext i32 @ctz_dereferencing_pointer(i64* %b) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, %hi(.LCPI0_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI0_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI0_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI0_1)
@@ -144,7 +144,7 @@ define i64 @ctz_dereferencing_pointer_zext(i32* %b) nounwind {
 ; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI1_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI1_0)
@@ -170,7 +170,7 @@ define i64 @ctz_dereferencing_pointer_zext(i32* %b) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI1_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI1_0)
@@ -220,7 +220,7 @@ define signext i32 @ctz1(i32 signext %x) nounwind {
 ; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
@@ -245,7 +245,7 @@ define signext i32 @ctz1(i32 signext %x) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
@@ -293,7 +293,7 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind {
 ; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI3_0)
@@ -318,7 +318,7 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI3_0)
@@ -364,7 +364,7 @@ define signext i32 @ctz2(i32 signext %x) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI4_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI4_0)
@@ -387,7 +387,7 @@ define signext i32 @ctz2(i32 signext %x) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI4_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI4_0)
@@ -429,7 +429,7 @@ define signext i32 @ctz3(i32 signext %x) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI5_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI5_0)
@@ -452,7 +452,7 @@ define signext i32 @ctz3(i32 signext %x) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI5_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI5_0)
@@ -509,14 +509,14 @@ define signext i32 @ctz4(i64 %b) nounwind {
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, %hi(.LCPI6_0)
 ; RV32I-NEXT:    addi s4, a0, %lo(.LCPI6_0)
 ; RV32I-NEXT:    neg a0, s2
 ; RV32I-NEXT:    and a0, s2, a0
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s2, .LBB6_3
 ; RV32I-NEXT:  # %bb.1: # %entry
 ; RV32I-NEXT:    li a0, 32
@@ -554,7 +554,7 @@ define signext i32 @ctz4(i64 %b) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI6_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_1)
@@ -643,7 +643,7 @@ define signext i32 @ctlz(i64 %b) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s3, a1, 257
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    srli a0, s2, 1
 ; RV32I-NEXT:    or a0, s2, a0
@@ -667,7 +667,7 @@ define signext i32 @ctlz(i64 %b) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s0, .LBB7_2
 ; RV32I-NEXT:  # %bb.1: # %entry
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -731,7 +731,7 @@ define signext i32 @ctlz(i64 %b) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    slli a0, a0, 2
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -772,7 +772,7 @@ define signext i32 @ctz5(i32 signext %x) nounwind {
 ; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI8_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
@@ -797,7 +797,7 @@ define signext i32 @ctz5(i32 signext %x) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
@@ -845,7 +845,7 @@ define signext i32 @ctz6(i32 signext %x) nounwind {
 ; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI9_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
@@ -870,7 +870,7 @@ define signext i32 @ctz6(i32 signext %x) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
@@ -923,7 +923,7 @@ define signext i32 @globalVar() nounwind {
 ; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI10_0)
@@ -949,7 +949,7 @@ define signext i32 @globalVar() nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI10_0)
diff --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll
index bf19bbd8b13145..91ac7c5ddae3ff 100644
--- a/llvm/test/CodeGen/RISCV/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll
@@ -121,7 +121,7 @@ define i64 @udiv64_constant_add(i64 %a) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 7
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    call __udivdi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -383,7 +383,7 @@ define i64 @sdiv64_constant_no_srai(i64 %a) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 3
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __divdi3 at plt
+; RV32-NEXT:    call __divdi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -407,7 +407,7 @@ define i64 @sdiv64_constant_srai(i64 %a) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 5
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __divdi3 at plt
+; RV32-NEXT:    call __divdi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -432,7 +432,7 @@ define i64 @sdiv64_constant_add_srai(i64 %a) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 15
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __divdi3 at plt
+; RV32-NEXT:    call __divdi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -460,7 +460,7 @@ define i64 @sdiv64_constant_sub_srai(i64 %a) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, -3
 ; RV32-NEXT:    li a3, -1
-; RV32-NEXT:    call __divdi3 at plt
+; RV32-NEXT:    call __divdi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index c455b439f0b1e5..99c83b99497dd3 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -11,7 +11,7 @@
 define i32 @udiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: udiv:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    tail __udivsi3 at plt
+; RV32I-NEXT:    tail __udivsi3
 ;
 ; RV32IM-LABEL: udiv:
 ; RV32IM:       # %bb.0:
@@ -26,7 +26,7 @@ define i32 @udiv(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -43,7 +43,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
 ; RV32I-LABEL: udiv_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    tail __udivsi3 at plt
+; RV32I-NEXT:    tail __udivsi3
 ;
 ; RV32IM-LABEL: udiv_constant:
 ; RV32IM:       # %bb.0:
@@ -60,7 +60,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -107,7 +107,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    tail __udivsi3 at plt
+; RV32I-NEXT:    tail __udivsi3
 ;
 ; RV32IM-LABEL: udiv_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -122,7 +122,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a1, a0, 32
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -141,7 +141,7 @@ define i64 @udiv64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __udivdi3 at plt
+; RV32I-NEXT:    call __udivdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -150,14 +150,14 @@ define i64 @udiv64(i64 %a, i64 %b) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    call __udivdi3 at plt
+; RV32IM-NEXT:    call __udivdi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: udiv64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __udivdi3 at plt
+; RV64I-NEXT:    tail __udivdi3
 ;
 ; RV64IM-LABEL: udiv64:
 ; RV64IM:       # %bb.0:
@@ -174,7 +174,7 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __udivdi3 at plt
+; RV32I-NEXT:    call __udivdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -206,7 +206,7 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV64I-LABEL: udiv64_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    tail __udivdi3 at plt
+; RV64I-NEXT:    tail __udivdi3
 ;
 ; RV64IM-LABEL: udiv64_constant:
 ; RV64IM:       # %bb.0:
@@ -230,7 +230,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __udivdi3 at plt
+; RV32I-NEXT:    call __udivdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -243,7 +243,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    mv a2, a0
 ; RV32IM-NEXT:    li a0, 10
 ; RV32IM-NEXT:    li a1, 0
-; RV32IM-NEXT:    call __udivdi3 at plt
+; RV32IM-NEXT:    call __udivdi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -252,7 +252,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    tail __udivdi3 at plt
+; RV64I-NEXT:    tail __udivdi3
 ;
 ; RV64IM-LABEL: udiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -270,7 +270,7 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    andi a1, a1, 255
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -288,7 +288,7 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a0, a0, 255
 ; RV64I-NEXT:    andi a1, a1, 255
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -310,7 +310,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -329,7 +329,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a0, a0, 255
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -380,7 +380,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a1, a0, 255
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -398,7 +398,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a1, a0, 255
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -422,7 +422,7 @@ define i16 @udiv16(i16 %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a2, a2, -1
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -444,7 +444,7 @@ define i16 @udiv16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addiw a2, a2, -1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -469,7 +469,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -489,7 +489,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -542,7 +542,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a1, a0, 16
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -562,7 +562,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a1, a0, 48
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -581,7 +581,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 define i32 @sdiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sdiv:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    tail __divsi3 at plt
+; RV32I-NEXT:    tail __divsi3
 ;
 ; RV32IM-LABEL: sdiv:
 ; RV32IM:       # %bb.0:
@@ -594,7 +594,7 @@ define i32 @sdiv(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -611,7 +611,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV32I-LABEL: sdiv_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    tail __divsi3 at plt
+; RV32I-NEXT:    tail __divsi3
 ;
 ; RV32IM-LABEL: sdiv_constant:
 ; RV32IM:       # %bb.0:
@@ -629,7 +629,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -725,7 +725,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    tail __divsi3 at plt
+; RV32I-NEXT:    tail __divsi3
 ;
 ; RV32IM-LABEL: sdiv_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -739,7 +739,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -758,7 +758,7 @@ define i64 @sdiv64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __divdi3 at plt
+; RV32I-NEXT:    call __divdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -767,14 +767,14 @@ define i64 @sdiv64(i64 %a, i64 %b) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    call __divdi3 at plt
+; RV32IM-NEXT:    call __divdi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: sdiv64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64:
 ; RV64IM:       # %bb.0:
@@ -791,7 +791,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __divdi3 at plt
+; RV32I-NEXT:    call __divdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -802,7 +802,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    li a2, 5
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __divdi3 at plt
+; RV32IM-NEXT:    call __divdi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -810,7 +810,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV64I-LABEL: sdiv64_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64_constant:
 ; RV64IM:       # %bb.0:
@@ -834,7 +834,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __divdi3 at plt
+; RV32I-NEXT:    call __divdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -847,7 +847,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    mv a2, a0
 ; RV32IM-NEXT:    li a0, 10
 ; RV32IM-NEXT:    li a1, 0
-; RV32IM-NEXT:    call __divdi3 at plt
+; RV32IM-NEXT:    call __divdi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -856,7 +856,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -878,7 +878,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    srai a1, a0, 31
 ; RV32I-NEXT:    srai a3, a2, 31
-; RV32I-NEXT:    call __divdi3 at plt
+; RV32I-NEXT:    call __divdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -890,7 +890,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
 ; RV32IM-NEXT:    mv a2, a1
 ; RV32IM-NEXT:    srai a1, a0, 31
 ; RV32IM-NEXT:    srai a3, a2, 31
-; RV32IM-NEXT:    call __divdi3 at plt
+; RV32IM-NEXT:    call __divdi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -899,7 +899,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64_sext_operands:
 ; RV64IM:       # %bb.0:
@@ -922,7 +922,7 @@ define i8 @sdiv8(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    srai a1, a1, 24
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -944,7 +944,7 @@ define i8 @sdiv8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -969,7 +969,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -993,7 +993,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1069,7 +1069,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a1, a0, 24
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1089,7 +1089,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a1, a0, 56
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1114,7 +1114,7 @@ define i16 @sdiv16(i16 %a, i16 %b) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    slli a1, a1, 16
 ; RV32I-NEXT:    srai a1, a1, 16
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1136,7 +1136,7 @@ define i16 @sdiv16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    slli a1, a1, 48
 ; RV64I-NEXT:    srai a1, a1, 48
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1161,7 +1161,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1185,7 +1185,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1261,7 +1261,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a1, a0, 16
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1281,7 +1281,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a1, a0, 48
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index c324cc88f84b97..186175537772b5 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -50,7 +50,7 @@ define double @fadd_d(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -59,7 +59,7 @@ define double @fadd_d(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -102,7 +102,7 @@ define double @fsub_d(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __subdf3 at plt
+; RV32I-NEXT:    call __subdf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -111,7 +111,7 @@ define double @fsub_d(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __subdf3 at plt
+; RV64I-NEXT:    call __subdf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -154,7 +154,7 @@ define double @fmul_d(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -163,7 +163,7 @@ define double @fmul_d(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -206,7 +206,7 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __divdf3 at plt
+; RV32I-NEXT:    call __divdf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -215,7 +215,7 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __divdf3 at plt
+; RV64I-NEXT:    call __divdf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -254,7 +254,7 @@ define double @fsqrt_d(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrt at plt
+; RV32I-NEXT:    call sqrt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -263,7 +263,7 @@ define double @fsqrt_d(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrt at plt
+; RV64I-NEXT:    call sqrt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -277,7 +277,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call fmin at plt
+; RV32IFD-NEXT:    call fmin
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -286,7 +286,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call fmin at plt
+; RV64IFD-NEXT:    call fmin
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -295,7 +295,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call fmin at plt
+; RV32IZFINXZDINX-NEXT:    call fmin
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -304,7 +304,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call fmin at plt
+; RV64IZFINXZDINX-NEXT:    call fmin
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -313,7 +313,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmin at plt
+; RV32I-NEXT:    call fmin
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -322,7 +322,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmin at plt
+; RV64I-NEXT:    call fmin
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -336,7 +336,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call fmax at plt
+; RV32IFD-NEXT:    call fmax
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -345,7 +345,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call fmax at plt
+; RV64IFD-NEXT:    call fmax
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -354,7 +354,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call fmax at plt
+; RV32IZFINXZDINX-NEXT:    call fmax
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -363,7 +363,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call fmax at plt
+; RV64IZFINXZDINX-NEXT:    call fmax
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -372,7 +372,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmax at plt
+; RV32I-NEXT:    call fmax
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -381,7 +381,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmax at plt
+; RV64I-NEXT:    call fmax
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -428,7 +428,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -437,7 +437,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -508,7 +508,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
 ; RV32I-NEXT:    xor a5, a1, a5
@@ -516,7 +516,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -535,13 +535,13 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a2, a0, a1
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -619,14 +619,14 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv s3, a2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
 ; RV32I-NEXT:    xor a2, s5, a5
@@ -635,7 +635,7 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a1, a2
 ; RV32I-NEXT:    mv a2, s3
 ; RV32I-NEXT:    mv a3, s2
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -656,18 +656,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -750,14 +750,14 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a1, a3
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
 ; RV32I-NEXT:    xor a3, s5, a5
@@ -765,7 +765,7 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s4
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -787,17 +787,17 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -872,14 +872,14 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv s3, a2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, a1, a2
 ; RV32I-NEXT:    mv a2, s3
 ; RV32I-NEXT:    mv a3, s2
 ; RV32I-NEXT:    mv a4, s1
 ; RV32I-NEXT:    mv a5, s0
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -897,13 +897,13 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -977,7 +977,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a1, a3
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    lui a3, 524288
 ; RV32I-NEXT:    xor a3, a1, a3
@@ -985,7 +985,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a4, s1
 ; RV32I-NEXT:    mv a5, s0
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1004,13 +1004,13 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index 7e2964ef68b13a..82ddf06187d383 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -51,7 +51,7 @@ define double @fadd_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -60,7 +60,7 @@ define double @fadd_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -102,7 +102,7 @@ define double @fsub_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __subdf3 at plt
+; RV32I-NEXT:    call __subdf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -111,7 +111,7 @@ define double @fsub_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __subdf3 at plt
+; RV64I-NEXT:    call __subdf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -153,7 +153,7 @@ define double @fmul_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -162,7 +162,7 @@ define double @fmul_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -204,7 +204,7 @@ define double @fdiv_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __divdf3 at plt
+; RV32I-NEXT:    call __divdf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -213,7 +213,7 @@ define double @fdiv_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __divdf3 at plt
+; RV64I-NEXT:    call __divdf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -253,7 +253,7 @@ define double @fsqrt_d(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrt at plt
+; RV32I-NEXT:    call sqrt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -262,7 +262,7 @@ define double @fsqrt_d(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrt at plt
+; RV64I-NEXT:    call sqrt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -359,11 +359,11 @@ define i32 @fneg_d(double %a, double %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a3, a1
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lui a3, 524288
 ; RV32I-NEXT:    xor a3, a1, a3
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -374,11 +374,11 @@ define i32 @fneg_d(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a1, a0, a1
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -494,12 +494,12 @@ define double @fabs_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    slli a1, a1, 1
 ; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -508,11 +508,11 @@ define double @fabs_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    slli a0, a0, 1
 ; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -558,7 +558,7 @@ define double @fmin_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmin at plt
+; RV32I-NEXT:    call fmin
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -567,7 +567,7 @@ define double @fmin_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmin at plt
+; RV64I-NEXT:    call fmin
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -611,7 +611,7 @@ define double @fmax_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmax at plt
+; RV32I-NEXT:    call fmax
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -620,7 +620,7 @@ define double @fmax_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmax at plt
+; RV64I-NEXT:    call fmax
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -668,7 +668,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -677,7 +677,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -747,7 +747,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
 ; RV32I-NEXT:    xor a5, a1, a5
@@ -755,7 +755,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -774,13 +774,13 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a2, a0, a1
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -858,14 +858,14 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
 ; RV32I-NEXT:    xor a2, s5, a5
@@ -874,7 +874,7 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a1, a2
 ; RV32I-NEXT:    mv a2, s3
 ; RV32I-NEXT:    mv a3, s2
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -895,18 +895,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -989,14 +989,14 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a1, a3
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
 ; RV32I-NEXT:    xor a3, s5, a5
@@ -1004,7 +1004,7 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s4
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1026,17 +1026,17 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1095,7 +1095,7 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, a1, a2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1106,7 +1106,7 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
@@ -1162,7 +1162,7 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, a1, a2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1173,7 +1173,7 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
@@ -1245,14 +1245,14 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, a1, a2
 ; RV32I-NEXT:    mv a2, s3
 ; RV32I-NEXT:    mv a3, s2
 ; RV32I-NEXT:    mv a4, s1
 ; RV32I-NEXT:    mv a5, s0
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1270,13 +1270,13 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1350,7 +1350,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a1, a3
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    lui a3, 524288
 ; RV32I-NEXT:    xor a3, a1, a3
@@ -1358,7 +1358,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a4, s1
 ; RV32I-NEXT:    mv a5, s0
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1377,13 +1377,13 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1437,10 +1437,10 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a5
 ; RV32I-NEXT:    mv s1, a4
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1453,9 +1453,9 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1529,17 +1529,17 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    mv a2, s4
 ; RV32I-NEXT:    mv a3, s5
-; RV32I-NEXT:    call __subdf3 at plt
+; RV32I-NEXT:    call __subdf3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1561,13 +1561,13 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    mv a1, s2
-; RV64I-NEXT:    call __subdf3 at plt
+; RV64I-NEXT:    call __subdf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1650,33 +1650,33 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s3
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, a1, a2
 ; RV32I-NEXT:    mv a2, s0
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    call __subdf3 at plt
+; RV32I-NEXT:    call __subdf3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1697,24 +1697,24 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __subdf3 at plt
+; RV64I-NEXT:    call __subdf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1796,24 +1796,24 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __subdf3 at plt
+; RV32I-NEXT:    call __subdf3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1834,17 +1834,17 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __subdf3 at plt
+; RV64I-NEXT:    call __subdf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index f2206d5397f001..2c5505edb1fa2a 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -21,7 +21,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB0_2: # %if.else
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_false:
 ; RV64IFD:       # %bb.0:
@@ -32,7 +32,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB0_2: # %if.else
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_false:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -43,7 +43,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:  .LBB0_2: # %if.else
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_false:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -54,7 +54,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB0_2: # %if.else
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp false double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -74,7 +74,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB1_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_oeq:
 ; RV64IFD:       # %bb.0:
@@ -85,7 +85,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB1_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_oeq:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -106,7 +106,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB1_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_oeq:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -117,7 +117,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB1_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -140,7 +140,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB2_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_oeq_alt:
 ; RV64IFD:       # %bb.0:
@@ -151,7 +151,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB2_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -172,7 +172,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB2_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -183,7 +183,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB2_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -203,7 +203,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB3_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_ogt:
 ; RV64IFD:       # %bb.0:
@@ -214,7 +214,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB3_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_ogt:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -235,7 +235,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB3_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_ogt:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -246,7 +246,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB3_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp ogt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -266,7 +266,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB4_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_oge:
 ; RV64IFD:       # %bb.0:
@@ -277,7 +277,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB4_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_oge:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -298,7 +298,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB4_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_oge:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -309,7 +309,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB4_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp oge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -329,7 +329,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB5_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_olt:
 ; RV64IFD:       # %bb.0:
@@ -340,7 +340,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB5_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_olt:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -361,7 +361,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB5_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_olt:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -372,7 +372,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB5_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp olt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -392,7 +392,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB6_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_ole:
 ; RV64IFD:       # %bb.0:
@@ -403,7 +403,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB6_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_ole:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -424,7 +424,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB6_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_ole:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -435,7 +435,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB6_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp ole double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -457,7 +457,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB7_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_one:
 ; RV64IFD:       # %bb.0:
@@ -470,7 +470,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB7_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_one:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -493,7 +493,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB7_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_one:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -506,7 +506,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB7_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp one double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -528,7 +528,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB8_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_ord:
 ; RV64IFD:       # %bb.0:
@@ -541,7 +541,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB8_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_ord:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -564,7 +564,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB8_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_ord:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -577,7 +577,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB8_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp ord double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -599,7 +599,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB9_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_ueq:
 ; RV64IFD:       # %bb.0:
@@ -612,7 +612,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB9_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_ueq:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -635,7 +635,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB9_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_ueq:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -648,7 +648,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB9_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp ueq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -668,7 +668,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB10_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_ugt:
 ; RV64IFD:       # %bb.0:
@@ -679,7 +679,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB10_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_ugt:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -700,7 +700,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB10_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_ugt:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -711,7 +711,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB10_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp ugt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -731,7 +731,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB11_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_uge:
 ; RV64IFD:       # %bb.0:
@@ -742,7 +742,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB11_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_uge:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -763,7 +763,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB11_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_uge:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -774,7 +774,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB11_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp uge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -794,7 +794,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB12_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_ult:
 ; RV64IFD:       # %bb.0:
@@ -805,7 +805,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB12_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_ult:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -826,7 +826,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB12_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_ult:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -837,7 +837,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB12_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp ult double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -857,7 +857,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB13_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_ule:
 ; RV64IFD:       # %bb.0:
@@ -868,7 +868,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB13_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_ule:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -889,7 +889,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB13_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_ule:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -900,7 +900,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB13_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp ule double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -920,7 +920,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB14_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_une:
 ; RV64IFD:       # %bb.0:
@@ -931,7 +931,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB14_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_une:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -952,7 +952,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB14_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_une:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -963,7 +963,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB14_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp une double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -985,7 +985,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB15_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_uno:
 ; RV64IFD:       # %bb.0:
@@ -998,7 +998,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB15_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_uno:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -1021,7 +1021,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ; RV32IZFINXZDINX-NEXT:  .LBB15_2: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_uno:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -1034,7 +1034,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB15_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp uno double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1054,7 +1054,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:  .LBB16_2: # %if.then
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV64IFD-LABEL: br_fcmp_true:
 ; RV64IFD:       # %bb.0:
@@ -1065,7 +1065,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:  .LBB16_2: # %if.then
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call abort at plt
+; RV64IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: br_fcmp_true:
 ; RV32IZFINXZDINX:       # %bb.0:
@@ -1076,7 +1076,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:  .LBB16_2: # %if.then
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 ;
 ; RV64IZFINXZDINX-LABEL: br_fcmp_true:
 ; RV64IZFINXZDINX:       # %bb.0:
@@ -1087,7 +1087,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:  .LBB16_2: # %if.then
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call abort at plt
+; RV64IZFINXZDINX-NEXT:    call abort
   %1 = fcmp true double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
diff --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
index ab511e8e62482e..d46256b12052e0 100644
--- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
@@ -62,7 +62,7 @@ define double @caller_double_inreg() nounwind {
 ; RV32IFD-NEXT:    lui a2, 262364
 ; RV32IFD-NEXT:    addi a3, a2, 655
 ; RV32IFD-NEXT:    mv a2, a0
-; RV32IFD-NEXT:    call callee_double_inreg at plt
+; RV32IFD-NEXT:    call callee_double_inreg
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -78,7 +78,7 @@ define double @caller_double_inreg() nounwind {
 ; RV32IZFINXZDINX-NEXT:    lui a2, 262364
 ; RV32IZFINXZDINX-NEXT:    addi a3, a2, 655
 ; RV32IZFINXZDINX-NEXT:    mv a2, a0
-; RV32IZFINXZDINX-NEXT:    call callee_double_inreg at plt
+; RV32IZFINXZDINX-NEXT:    call callee_double_inreg
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -145,7 +145,7 @@ define double @caller_double_split_reg_stack() nounwind {
 ; RV32IFD-NEXT:    li a2, 0
 ; RV32IFD-NEXT:    li a4, 0
 ; RV32IFD-NEXT:    mv a7, a5
-; RV32IFD-NEXT:    call callee_double_split_reg_stack at plt
+; RV32IFD-NEXT:    call callee_double_split_reg_stack
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -167,7 +167,7 @@ define double @caller_double_split_reg_stack() nounwind {
 ; RV32IZFINXZDINX-NEXT:    li a2, 0
 ; RV32IZFINXZDINX-NEXT:    li a4, 0
 ; RV32IZFINXZDINX-NEXT:    mv a7, a5
-; RV32IZFINXZDINX-NEXT:    call callee_double_split_reg_stack at plt
+; RV32IZFINXZDINX-NEXT:    call callee_double_split_reg_stack
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -229,7 +229,7 @@ define double @caller_double_stack() nounwind {
 ; RV32IFD-NEXT:    li a3, 0
 ; RV32IFD-NEXT:    li a5, 0
 ; RV32IFD-NEXT:    li a7, 0
-; RV32IFD-NEXT:    call callee_double_stack at plt
+; RV32IFD-NEXT:    call callee_double_stack
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
@@ -256,7 +256,7 @@ define double @caller_double_stack() nounwind {
 ; RV32IZFINXZDINX-NEXT:    li a3, 0
 ; RV32IZFINXZDINX-NEXT:    li a5, 0
 ; RV32IZFINXZDINX-NEXT:    li a7, 0
-; RV32IZFINXZDINX-NEXT:    call callee_double_stack at plt
+; RV32IZFINXZDINX-NEXT:    call callee_double_stack
 ; RV32IZFINXZDINX-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 32
 ; RV32IZFINXZDINX-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index adbe2c9e9754d2..967b119581af1a 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -46,7 +46,7 @@ define float @fcvt_s_d(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __truncdfsf2 at plt
+; RV32I-NEXT:    call __truncdfsf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -55,7 +55,7 @@ define float @fcvt_s_d(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __truncdfsf2 at plt
+; RV64I-NEXT:    call __truncdfsf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -90,7 +90,7 @@ define double @fcvt_d_s(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __extendsfdf2 at plt
+; RV32I-NEXT:    call __extendsfdf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -99,7 +99,7 @@ define double @fcvt_d_s(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __extendsfdf2 at plt
+; RV64I-NEXT:    call __extendsfdf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -134,7 +134,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -143,7 +143,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixdfsi at plt
+; RV64I-NEXT:    call __fixdfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -180,7 +180,7 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -189,7 +189,7 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfsi at plt
+; RV64I-NEXT:    call __fixunsdfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -232,7 +232,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    seqz a1, a0
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -243,7 +243,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfsi at plt
+; RV64I-NEXT:    call __fixunsdfsi
 ; RV64I-NEXT:    seqz a1, a0
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -281,7 +281,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -291,7 +291,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -330,7 +330,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -340,7 +340,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -375,7 +375,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -385,7 +385,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -430,7 +430,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -440,7 +440,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -454,7 +454,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -468,7 +468,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -482,7 +482,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixdfdi at plt
+; RV32I-NEXT:    call __fixdfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -491,7 +491,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -505,7 +505,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -519,7 +519,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -533,7 +533,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfdi at plt
+; RV32I-NEXT:    call __fixunsdfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -542,7 +542,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -556,7 +556,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __floatdidf at plt
+; RV32IFD-NEXT:    call __floatdidf
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -570,7 +570,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __floatdidf at plt
+; RV32IZFINXZDINX-NEXT:    call __floatdidf
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -584,7 +584,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatdidf at plt
+; RV32I-NEXT:    call __floatdidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -593,7 +593,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatdidf at plt
+; RV64I-NEXT:    call __floatdidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -607,7 +607,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __floatundidf at plt
+; RV32IFD-NEXT:    call __floatundidf
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -621,7 +621,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __floatundidf at plt
+; RV32IZFINXZDINX-NEXT:    call __floatundidf
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -635,7 +635,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatundidf at plt
+; RV32I-NEXT:    call __floatundidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -644,7 +644,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatundidf at plt
+; RV64I-NEXT:    call __floatundidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -679,7 +679,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -688,7 +688,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -723,7 +723,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -732,7 +732,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -767,7 +767,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -776,7 +776,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -811,7 +811,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -820,7 +820,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -871,7 +871,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    sw a1, 4(s0)
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
@@ -890,7 +890,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    sd a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -944,7 +944,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    sw a1, 4(s0)
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
@@ -963,7 +963,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    sd a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 39ac963051b5b0..eb8ffe75ef7697 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -38,7 +38,7 @@ define float @fcvt_s_d(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __truncdfsf2 at plt
+; RV32I-NEXT:    call __truncdfsf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -47,7 +47,7 @@ define float @fcvt_s_d(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __truncdfsf2 at plt
+; RV64I-NEXT:    call __truncdfsf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -81,7 +81,7 @@ define double @fcvt_d_s(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __extendsfdf2 at plt
+; RV32I-NEXT:    call __extendsfdf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -90,7 +90,7 @@ define double @fcvt_d_s(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __extendsfdf2 at plt
+; RV64I-NEXT:    call __extendsfdf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -124,7 +124,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -133,7 +133,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixdfsi at plt
+; RV64I-NEXT:    call __fixdfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -189,17 +189,17 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    lui a3, 269824
 ; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1047552
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 794112
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lui a0, 524288
 ; RV32I-NEXT:    bgez s4, .LBB3_2
@@ -214,7 +214,7 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -238,10 +238,10 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, -497
 ; RV64I-NEXT:    slli a1, a1, 53
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bgez s2, .LBB3_2
@@ -253,14 +253,14 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 22
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB3_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    addiw s1, s3, -1
 ; RV64I-NEXT:  .LBB3_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -305,7 +305,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -314,7 +314,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfsi at plt
+; RV64I-NEXT:    call __fixunsdfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -356,7 +356,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    seqz a1, a0
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -367,7 +367,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfsi at plt
+; RV64I-NEXT:    call __fixunsdfsi
 ; RV64I-NEXT:    seqz a1, a0
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -439,19 +439,19 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    lui a3, 270080
 ; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1048064
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    and a0, s3, a0
 ; RV32I-NEXT:    or a0, s2, a0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -471,17 +471,17 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a0, 1055
 ; RV64I-NEXT:    slli a0, a0, 31
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 21
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB6_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, -1
@@ -530,7 +530,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -540,7 +540,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -578,7 +578,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -588,7 +588,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -623,7 +623,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -633,7 +633,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -677,7 +677,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -687,7 +687,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -701,7 +701,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -715,7 +715,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -729,7 +729,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixdfdi at plt
+; RV32I-NEXT:    call __fixdfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -738,7 +738,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -757,7 +757,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fle.d s0, fa5, fa0
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lui a4, 524288
 ; RV32IFD-NEXT:    lui a2, 524288
 ; RV32IFD-NEXT:    beqz s0, .LBB12_2
@@ -804,7 +804,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI12_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI12_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI12_0)(a2)
@@ -861,17 +861,17 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    lui a3, 278016
 ; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    li a2, -1
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 802304
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixdfdi at plt
+; RV32I-NEXT:    call __fixdfdi
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    lui a0, 524288
@@ -887,7 +887,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a1, a0, s5
@@ -919,10 +919,10 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, -481
 ; RV64I-NEXT:    slli a1, a1, 53
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li s3, -1
 ; RV64I-NEXT:    bgez s2, .LBB12_2
@@ -933,14 +933,14 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 53
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB12_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    srli s1, s3, 1
 ; RV64I-NEXT:  .LBB12_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -962,7 +962,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -976,7 +976,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -990,7 +990,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfdi at plt
+; RV32I-NEXT:    call __fixunsdfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -999,7 +999,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1018,7 +1018,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    fcvt.d.w fa5, zero
 ; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s0, a0
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI14_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI14_0)(a2)
 ; RV32IFD-NEXT:    and a0, s0, a0
@@ -1052,7 +1052,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
 ; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI14_0)
 ; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI14_0+4)(a4)
@@ -1093,19 +1093,19 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    lui a3, 278272
 ; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    li a2, -1
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixunsdfdi at plt
+; RV32I-NEXT:    call __fixunsdfdi
 ; RV32I-NEXT:    and a0, s3, a0
 ; RV32I-NEXT:    or a0, s2, a0
 ; RV32I-NEXT:    and a1, s3, a1
@@ -1126,17 +1126,17 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    addi s1, a0, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    and s1, s1, a0
 ; RV64I-NEXT:    li a0, 1087
 ; RV64I-NEXT:    slli a0, a0, 52
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    neg a0, a0
 ; RV64I-NEXT:    or a0, a0, s1
@@ -1196,7 +1196,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1205,7 +1205,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1219,7 +1219,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __floatdidf at plt
+; RV32IFD-NEXT:    call __floatdidf
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1233,7 +1233,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __floatdidf at plt
+; RV32IZFINXZDINX-NEXT:    call __floatdidf
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1247,7 +1247,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatdidf at plt
+; RV32I-NEXT:    call __floatdidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1256,7 +1256,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatdidf at plt
+; RV64I-NEXT:    call __floatdidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1269,7 +1269,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __floatundidf at plt
+; RV32IFD-NEXT:    call __floatundidf
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1283,7 +1283,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __floatundidf at plt
+; RV32IZFINXZDINX-NEXT:    call __floatundidf
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1297,7 +1297,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatundidf at plt
+; RV32I-NEXT:    call __floatundidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1306,7 +1306,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatundidf at plt
+; RV64I-NEXT:    call __floatundidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1364,7 +1364,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1373,7 +1373,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1409,7 +1409,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1418,7 +1418,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1452,7 +1452,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1461,7 +1461,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1495,7 +1495,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1504,7 +1504,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1538,7 +1538,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1547,7 +1547,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1597,7 +1597,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatsidf at plt
+; RV32I-NEXT:    call __floatsidf
 ; RV32I-NEXT:    sw a1, 4(s0)
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
@@ -1616,7 +1616,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatsidf at plt
+; RV64I-NEXT:    call __floatsidf
 ; RV64I-NEXT:    sd a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1670,7 +1670,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatunsidf at plt
+; RV32I-NEXT:    call __floatunsidf
 ; RV32I-NEXT:    sw a1, 4(s0)
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
@@ -1689,7 +1689,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatunsidf at plt
+; RV64I-NEXT:    call __floatunsidf
 ; RV64I-NEXT:    sd a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1734,7 +1734,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1743,7 +1743,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1831,17 +1831,17 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV32I-NEXT:    addi a3, a0, -64
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 790016
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    bgez s4, .LBB26_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -1856,7 +1856,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -1881,10 +1881,10 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, -505
 ; RV64I-NEXT:    slli a1, a1, 53
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    bgez s2, .LBB26_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -1894,7 +1894,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 38
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB26_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    lui s1, 8
@@ -1902,7 +1902,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:  .LBB26_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -1951,7 +1951,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1960,7 +1960,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2028,17 +2028,17 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV32I-NEXT:    lui a3, 265984
 ; RV32I-NEXT:    addi a3, a3, -32
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    blez s3, .LBB28_2
@@ -2068,16 +2068,16 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 8312
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 37
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    blez a0, .LBB28_2
@@ -2133,7 +2133,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2142,7 +2142,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2228,17 +2228,17 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a3, 263676
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 787968
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    bgez s4, .LBB30_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -2252,7 +2252,7 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -2277,10 +2277,10 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, -509
 ; RV64I-NEXT:    slli a1, a1, 53
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    bgez s2, .LBB30_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -2289,14 +2289,14 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:    lui a1, 65919
 ; RV64I-NEXT:    slli a1, a1, 34
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB30_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    li s1, 127
 ; RV64I-NEXT:  .LBB30_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -2347,7 +2347,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2356,7 +2356,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2425,17 +2425,17 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 263934
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    blez s3, .LBB32_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    li a0, 255
@@ -2463,15 +2463,15 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a1, 131967
 ; RV64I-NEXT:    slli a1, a1, 33
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB32_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, 255
@@ -2554,19 +2554,19 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
 ; RV32I-NEXT:    lui a3, 270080
 ; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1048064
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    call __fixunsdfsi
 ; RV32I-NEXT:    and a0, s3, a0
 ; RV32I-NEXT:    or a0, s2, a0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -2586,17 +2586,17 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    call __fixunsdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a0, 1055
 ; RV64I-NEXT:    slli a0, a0, 31
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 21
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB33_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, -1
@@ -2668,17 +2668,17 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; RV32I-NEXT:    lui a3, 269824
 ; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    lui a2, 1047552
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a3, 794112
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    li a2, 0
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    call __fixdfsi
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lui a0, 524288
 ; RV32I-NEXT:    bgez s4, .LBB34_2
@@ -2693,7 +2693,7 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -2717,10 +2717,10 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, -497
 ; RV64I-NEXT:    slli a1, a1, 53
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    call __fixdfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bgez s2, .LBB34_2
@@ -2732,14 +2732,14 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 22
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    blez a0, .LBB34_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    addi s1, s3, -1
 ; RV64I-NEXT:  .LBB34_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index 428f63196bfe01..3ae2e997019caa 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -46,7 +46,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -56,7 +56,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -107,7 +107,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -117,7 +117,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -167,7 +167,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -178,7 +178,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -229,7 +229,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    call __ltdf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -239,7 +239,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    call __ltdf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -289,7 +289,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    call __ledf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -299,7 +299,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    call __ledf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -375,13 +375,13 @@ define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    snez s4, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    and a0, a0, s4
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -402,11 +402,11 @@ define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -456,7 +456,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -466,7 +466,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -545,13 +545,13 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    seqz s4, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    or a0, a0, s4
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -572,11 +572,11 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -633,7 +633,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    call __ledf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -643,7 +643,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    call __ledf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -696,7 +696,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    call __ltdf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -707,7 +707,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    call __ltdf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -761,7 +761,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -771,7 +771,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -824,7 +824,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -834,7 +834,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -877,7 +877,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __nedf2 at plt
+; RV32I-NEXT:    call __nedf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -887,7 +887,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __nedf2 at plt
+; RV64I-NEXT:    call __nedf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -936,7 +936,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -946,7 +946,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -992,7 +992,7 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1002,7 +1002,7 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1043,7 +1043,7 @@ define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1053,7 +1053,7 @@ define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1093,7 +1093,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1104,7 +1104,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1145,7 +1145,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    call __ltdf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1155,7 +1155,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    call __ltdf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1195,7 +1195,7 @@ define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    call __ledf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1205,7 +1205,7 @@ define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    call __ledf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1260,13 +1260,13 @@ define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    snez s4, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    and a0, a0, s4
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -1287,11 +1287,11 @@ define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1341,7 +1341,7 @@ define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1351,7 +1351,7 @@ define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1409,13 +1409,13 @@ define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    seqz s4, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    or a0, a0, s4
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -1436,11 +1436,11 @@ define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1487,7 +1487,7 @@ define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    call __ledf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1497,7 +1497,7 @@ define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    call __ledf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1540,7 +1540,7 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    call __ltdf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1551,7 +1551,7 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    call __ltdf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1595,7 +1595,7 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1605,7 +1605,7 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1648,7 +1648,7 @@ define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1658,7 +1658,7 @@ define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1707,7 +1707,7 @@ define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __nedf2 at plt
+; RV32I-NEXT:    call __nedf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1717,7 +1717,7 @@ define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __nedf2 at plt
+; RV64I-NEXT:    call __nedf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1766,7 +1766,7 @@ define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1776,7 +1776,7 @@ define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index b1b3c46c4bf51c..64a154f450f183 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -67,7 +67,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -77,7 +77,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -117,7 +117,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -127,7 +127,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -167,7 +167,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -178,7 +178,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -219,7 +219,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    call __ltdf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -229,7 +229,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    call __ltdf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -269,7 +269,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    call __ledf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -279,7 +279,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    call __ledf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -334,13 +334,13 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    snez s4, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    and a0, a0, s4
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -361,11 +361,11 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -415,7 +415,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -425,7 +425,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -483,13 +483,13 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    call __eqdf2
 ; RV32I-NEXT:    seqz s4, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    or a0, a0, s4
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -510,11 +510,11 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    call __eqdf2
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -561,7 +561,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    call __ledf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -571,7 +571,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    call __ledf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -614,7 +614,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    call __ltdf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -625,7 +625,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    call __ltdf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -669,7 +669,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    call __gedf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -679,7 +679,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    call __gedf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -722,7 +722,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    call __gtdf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -732,7 +732,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    call __gtdf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -775,7 +775,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __nedf2 at plt
+; RV32I-NEXT:    call __nedf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -785,7 +785,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __nedf2 at plt
+; RV64I-NEXT:    call __nedf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -834,7 +834,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -844,7 +844,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/double-frem.ll b/llvm/test/CodeGen/RISCV/double-frem.ll
index 118ded45c0fc52..5303e84e5ded37 100644
--- a/llvm/test/CodeGen/RISCV/double-frem.ll
+++ b/llvm/test/CodeGen/RISCV/double-frem.ll
@@ -11,24 +11,24 @@
 define double @frem_f64(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: frem_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail fmod at plt
+; RV32IFD-NEXT:    tail fmod
 ;
 ; RV64IFD-LABEL: frem_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    tail fmod at plt
+; RV64IFD-NEXT:    tail fmod
 ;
 ; RV32IZFINXZDINX-LABEL: frem_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call fmod at plt
+; RV32IZFINXZDINX-NEXT:    call fmod
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: frem_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail fmod at plt
+; RV64IZFINXZDINX-NEXT:    tail fmod
   %1 = frem double %a, %b
   ret double %1
 }
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index da24e4b7b718a9..c574f64150a249 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -50,7 +50,7 @@ define double @sqrt_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrt at plt
+; RV32I-NEXT:    call sqrt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -59,7 +59,7 @@ define double @sqrt_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrt at plt
+; RV64I-NEXT:    call sqrt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -74,7 +74,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __powidf2 at plt
+; RV32IFD-NEXT:    call __powidf2
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -84,7 +84,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    sext.w a0, a0
-; RV64IFD-NEXT:    call __powidf2 at plt
+; RV64IFD-NEXT:    call __powidf2
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -93,7 +93,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __powidf2 at plt
+; RV32IZFINXZDINX-NEXT:    call __powidf2
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -103,7 +103,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    sext.w a1, a1
-; RV64IZFINXZDINX-NEXT:    call __powidf2 at plt
+; RV64IZFINXZDINX-NEXT:    call __powidf2
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -112,7 +112,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __powidf2 at plt
+; RV32I-NEXT:    call __powidf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -122,7 +122,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __powidf2 at plt
+; RV64I-NEXT:    call __powidf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -137,7 +137,7 @@ define double @sin_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call sin at plt
+; RV32IFD-NEXT:    call sin
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -146,7 +146,7 @@ define double @sin_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call sin at plt
+; RV64IFD-NEXT:    call sin
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -155,7 +155,7 @@ define double @sin_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call sin at plt
+; RV32IZFINXZDINX-NEXT:    call sin
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -164,7 +164,7 @@ define double @sin_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call sin at plt
+; RV64IZFINXZDINX-NEXT:    call sin
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -173,7 +173,7 @@ define double @sin_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sin at plt
+; RV32I-NEXT:    call sin
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -182,7 +182,7 @@ define double @sin_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sin at plt
+; RV64I-NEXT:    call sin
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -197,7 +197,7 @@ define double @cos_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call cos at plt
+; RV32IFD-NEXT:    call cos
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -206,7 +206,7 @@ define double @cos_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call cos at plt
+; RV64IFD-NEXT:    call cos
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -215,7 +215,7 @@ define double @cos_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call cos at plt
+; RV32IZFINXZDINX-NEXT:    call cos
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -224,7 +224,7 @@ define double @cos_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call cos at plt
+; RV64IZFINXZDINX-NEXT:    call cos
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -233,7 +233,7 @@ define double @cos_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call cos at plt
+; RV32I-NEXT:    call cos
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -242,7 +242,7 @@ define double @cos_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call cos at plt
+; RV64I-NEXT:    call cos
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -259,10 +259,10 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    call sin at plt
+; RV32IFD-NEXT:    call sin
 ; RV32IFD-NEXT:    fmv.d fs1, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs0
-; RV32IFD-NEXT:    call cos at plt
+; RV32IFD-NEXT:    call cos
 ; RV32IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
@@ -277,10 +277,10 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
-; RV64IFD-NEXT:    call sin at plt
+; RV64IFD-NEXT:    call sin
 ; RV64IFD-NEXT:    fmv.d fs1, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs0
-; RV64IFD-NEXT:    call cos at plt
+; RV64IFD-NEXT:    call cos
 ; RV64IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
@@ -298,14 +298,14 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    mv s0, a1
 ; RV32IZFINXZDINX-NEXT:    mv s1, a0
-; RV32IZFINXZDINX-NEXT:    call sin at plt
+; RV32IZFINXZDINX-NEXT:    call sin
 ; RV32IZFINXZDINX-NEXT:    sw a0, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s3, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    mv a0, s1
 ; RV32IZFINXZDINX-NEXT:    mv a1, s0
-; RV32IZFINXZDINX-NEXT:    call cos at plt
+; RV32IZFINXZDINX-NEXT:    call cos
 ; RV32IZFINXZDINX-NEXT:    sw a0, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a0, 0(sp)
@@ -330,10 +330,10 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv s0, a0
-; RV64IZFINXZDINX-NEXT:    call sin at plt
+; RV64IZFINXZDINX-NEXT:    call sin
 ; RV64IZFINXZDINX-NEXT:    mv s1, a0
 ; RV64IZFINXZDINX-NEXT:    mv a0, s0
-; RV64IZFINXZDINX-NEXT:    call cos at plt
+; RV64IZFINXZDINX-NEXT:    call cos
 ; RV64IZFINXZDINX-NEXT:    fadd.d a0, s1, a0
 ; RV64IZFINXZDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -351,17 +351,17 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call sin at plt
+; RV32I-NEXT:    call sin
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call cos at plt
+; RV32I-NEXT:    call cos
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -377,13 +377,13 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    call sin at plt
+; RV64I-NEXT:    call sin
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call cos at plt
+; RV64I-NEXT:    call cos
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -402,7 +402,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call pow at plt
+; RV32IFD-NEXT:    call pow
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -411,7 +411,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call pow at plt
+; RV64IFD-NEXT:    call pow
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -420,7 +420,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call pow at plt
+; RV32IZFINXZDINX-NEXT:    call pow
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -429,7 +429,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call pow at plt
+; RV64IZFINXZDINX-NEXT:    call pow
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -438,7 +438,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call pow at plt
+; RV32I-NEXT:    call pow
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -447,7 +447,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call pow at plt
+; RV64I-NEXT:    call pow
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -462,7 +462,7 @@ define double @exp_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call exp at plt
+; RV32IFD-NEXT:    call exp
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -471,7 +471,7 @@ define double @exp_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call exp at plt
+; RV64IFD-NEXT:    call exp
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -480,7 +480,7 @@ define double @exp_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call exp at plt
+; RV32IZFINXZDINX-NEXT:    call exp
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -489,7 +489,7 @@ define double @exp_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call exp at plt
+; RV64IZFINXZDINX-NEXT:    call exp
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -498,7 +498,7 @@ define double @exp_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call exp at plt
+; RV32I-NEXT:    call exp
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -507,7 +507,7 @@ define double @exp_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call exp at plt
+; RV64I-NEXT:    call exp
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -522,7 +522,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call exp2 at plt
+; RV32IFD-NEXT:    call exp2
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -531,7 +531,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call exp2 at plt
+; RV64IFD-NEXT:    call exp2
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -540,7 +540,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call exp2 at plt
+; RV32IZFINXZDINX-NEXT:    call exp2
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -549,7 +549,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call exp2 at plt
+; RV64IZFINXZDINX-NEXT:    call exp2
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -558,7 +558,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call exp2 at plt
+; RV32I-NEXT:    call exp2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -567,7 +567,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call exp2 at plt
+; RV64I-NEXT:    call exp2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -582,7 +582,7 @@ define double @log_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call log at plt
+; RV32IFD-NEXT:    call log
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -591,7 +591,7 @@ define double @log_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call log at plt
+; RV64IFD-NEXT:    call log
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -600,7 +600,7 @@ define double @log_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call log at plt
+; RV32IZFINXZDINX-NEXT:    call log
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -609,7 +609,7 @@ define double @log_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call log at plt
+; RV64IZFINXZDINX-NEXT:    call log
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -618,7 +618,7 @@ define double @log_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log at plt
+; RV32I-NEXT:    call log
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -627,7 +627,7 @@ define double @log_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log at plt
+; RV64I-NEXT:    call log
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -642,7 +642,7 @@ define double @log10_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call log10 at plt
+; RV32IFD-NEXT:    call log10
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -651,7 +651,7 @@ define double @log10_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call log10 at plt
+; RV64IFD-NEXT:    call log10
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -660,7 +660,7 @@ define double @log10_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call log10 at plt
+; RV32IZFINXZDINX-NEXT:    call log10
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -669,7 +669,7 @@ define double @log10_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call log10 at plt
+; RV64IZFINXZDINX-NEXT:    call log10
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -678,7 +678,7 @@ define double @log10_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log10 at plt
+; RV32I-NEXT:    call log10
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -687,7 +687,7 @@ define double @log10_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log10 at plt
+; RV64I-NEXT:    call log10
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -702,7 +702,7 @@ define double @log2_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call log2 at plt
+; RV32IFD-NEXT:    call log2
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -711,7 +711,7 @@ define double @log2_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call log2 at plt
+; RV64IFD-NEXT:    call log2
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -720,7 +720,7 @@ define double @log2_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call log2 at plt
+; RV32IZFINXZDINX-NEXT:    call log2
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -729,7 +729,7 @@ define double @log2_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call log2 at plt
+; RV64IZFINXZDINX-NEXT:    call log2
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -738,7 +738,7 @@ define double @log2_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log2 at plt
+; RV32I-NEXT:    call log2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -747,7 +747,7 @@ define double @log2_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log2 at plt
+; RV64I-NEXT:    call log2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -795,7 +795,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -804,7 +804,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -856,10 +856,10 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a5
 ; RV32I-NEXT:    mv s1, a4
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -872,9 +872,9 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -890,7 +890,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call fmin at plt
+; RV32IFD-NEXT:    call fmin
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -899,7 +899,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call fmin at plt
+; RV64IFD-NEXT:    call fmin
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -908,7 +908,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call fmin at plt
+; RV32IZFINXZDINX-NEXT:    call fmin
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -917,7 +917,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call fmin at plt
+; RV64IZFINXZDINX-NEXT:    call fmin
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -926,7 +926,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmin at plt
+; RV32I-NEXT:    call fmin
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -935,7 +935,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmin at plt
+; RV64I-NEXT:    call fmin
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -950,7 +950,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call fmax at plt
+; RV32IFD-NEXT:    call fmax
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -959,7 +959,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call fmax at plt
+; RV64IFD-NEXT:    call fmax
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -968,7 +968,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call fmax at plt
+; RV32IZFINXZDINX-NEXT:    call fmax
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -977,7 +977,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call fmax at plt
+; RV64IZFINXZDINX-NEXT:    call fmax
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -986,7 +986,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmax at plt
+; RV32I-NEXT:    call fmax
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -995,7 +995,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmax at plt
+; RV64I-NEXT:    call fmax
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1027,7 +1027,7 @@ define double @floor_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call floor at plt
+; RV32IFD-NEXT:    call floor
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1036,7 +1036,7 @@ define double @floor_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call floor at plt
+; RV64IFD-NEXT:    call floor
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1045,7 +1045,7 @@ define double @floor_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call floor at plt
+; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1054,7 +1054,7 @@ define double @floor_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call floor at plt
+; RV64IZFINXZDINX-NEXT:    call floor
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1063,7 +1063,7 @@ define double @floor_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call floor at plt
+; RV32I-NEXT:    call floor
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1072,7 +1072,7 @@ define double @floor_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call floor at plt
+; RV64I-NEXT:    call floor
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1087,7 +1087,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call ceil at plt
+; RV32IFD-NEXT:    call ceil
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1096,7 +1096,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call ceil at plt
+; RV64IFD-NEXT:    call ceil
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1105,7 +1105,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call ceil at plt
+; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1114,7 +1114,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call ceil at plt
+; RV64IZFINXZDINX-NEXT:    call ceil
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1123,7 +1123,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call ceil at plt
+; RV32I-NEXT:    call ceil
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1132,7 +1132,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call ceil at plt
+; RV64I-NEXT:    call ceil
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1147,7 +1147,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call trunc at plt
+; RV32IFD-NEXT:    call trunc
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1156,7 +1156,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call trunc at plt
+; RV64IFD-NEXT:    call trunc
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1165,7 +1165,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call trunc at plt
+; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1174,7 +1174,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call trunc at plt
+; RV64IZFINXZDINX-NEXT:    call trunc
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1183,7 +1183,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call trunc at plt
+; RV32I-NEXT:    call trunc
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1192,7 +1192,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call trunc at plt
+; RV64I-NEXT:    call trunc
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1207,7 +1207,7 @@ define double @rint_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call rint at plt
+; RV32IFD-NEXT:    call rint
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1216,7 +1216,7 @@ define double @rint_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call rint at plt
+; RV64IFD-NEXT:    call rint
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1225,7 +1225,7 @@ define double @rint_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call rint at plt
+; RV32IZFINXZDINX-NEXT:    call rint
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1234,7 +1234,7 @@ define double @rint_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call rint at plt
+; RV64IZFINXZDINX-NEXT:    call rint
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1243,7 +1243,7 @@ define double @rint_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call rint at plt
+; RV32I-NEXT:    call rint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1252,7 +1252,7 @@ define double @rint_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call rint at plt
+; RV64I-NEXT:    call rint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1267,7 +1267,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call nearbyint at plt
+; RV32IFD-NEXT:    call nearbyint
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1276,7 +1276,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call nearbyint at plt
+; RV64IFD-NEXT:    call nearbyint
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1285,7 +1285,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call nearbyint at plt
+; RV32IZFINXZDINX-NEXT:    call nearbyint
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1294,7 +1294,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call nearbyint at plt
+; RV64IZFINXZDINX-NEXT:    call nearbyint
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1303,7 +1303,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call nearbyint at plt
+; RV32I-NEXT:    call nearbyint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1312,7 +1312,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call nearbyint at plt
+; RV64I-NEXT:    call nearbyint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1327,7 +1327,7 @@ define double @round_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call round at plt
+; RV32IFD-NEXT:    call round
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1336,7 +1336,7 @@ define double @round_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call round at plt
+; RV64IFD-NEXT:    call round
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1345,7 +1345,7 @@ define double @round_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call round at plt
+; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1354,7 +1354,7 @@ define double @round_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call round at plt
+; RV64IZFINXZDINX-NEXT:    call round
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1363,7 +1363,7 @@ define double @round_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call round at plt
+; RV32I-NEXT:    call round
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1372,7 +1372,7 @@ define double @round_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call round at plt
+; RV64I-NEXT:    call round
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1387,7 +1387,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call roundeven at plt
+; RV32IFD-NEXT:    call roundeven
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1396,7 +1396,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call roundeven at plt
+; RV64IFD-NEXT:    call roundeven
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1405,7 +1405,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call roundeven at plt
+; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1414,7 +1414,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call roundeven at plt
+; RV64IZFINXZDINX-NEXT:    call roundeven
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1423,7 +1423,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call roundeven at plt
+; RV32I-NEXT:    call roundeven
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1432,7 +1432,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call roundeven at plt
+; RV64I-NEXT:    call roundeven
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1473,7 +1473,7 @@ define iXLen @lrint_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lrint at plt
+; RV32I-NEXT:    call lrint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1482,7 +1482,7 @@ define iXLen @lrint_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lrint at plt
+; RV64I-NEXT:    call lrint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1523,7 +1523,7 @@ define iXLen @lround_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lround at plt
+; RV32I-NEXT:    call lround
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1532,7 +1532,7 @@ define iXLen @lround_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lround at plt
+; RV64I-NEXT:    call lround
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1547,7 +1547,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call llrint at plt
+; RV32IFD-NEXT:    call llrint
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1561,7 +1561,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call llrint at plt
+; RV32IZFINXZDINX-NEXT:    call llrint
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1575,7 +1575,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llrint at plt
+; RV32I-NEXT:    call llrint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1584,7 +1584,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llrint at plt
+; RV64I-NEXT:    call llrint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1599,7 +1599,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call llround at plt
+; RV32IFD-NEXT:    call llround
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1613,7 +1613,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call llround at plt
+; RV32IZFINXZDINX-NEXT:    call llround
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1627,7 +1627,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llround at plt
+; RV32I-NEXT:    call llround
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1636,7 +1636,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llround at plt
+; RV64I-NEXT:    call llround
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 36268accc8fd04..f290cf0f7736e3 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -48,7 +48,7 @@ define double @sqrt_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrt at plt
+; RV32I-NEXT:    call sqrt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -57,7 +57,7 @@ define double @sqrt_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrt at plt
+; RV64I-NEXT:    call sqrt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -70,14 +70,14 @@ declare double @llvm.powi.f64.i32(double, i32)
 define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV32IFD-LABEL: powi_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail __powidf2 at plt
+; RV32IFD-NEXT:    tail __powidf2
 ;
 ; RV64IFD-LABEL: powi_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    sext.w a0, a0
-; RV64IFD-NEXT:    call __powidf2 at plt
+; RV64IFD-NEXT:    call __powidf2
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -86,7 +86,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __powidf2 at plt
+; RV32IZFINXZDINX-NEXT:    call __powidf2
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -96,7 +96,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    sext.w a1, a1
-; RV64IZFINXZDINX-NEXT:    call __powidf2 at plt
+; RV64IZFINXZDINX-NEXT:    call __powidf2
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -105,7 +105,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __powidf2 at plt
+; RV32I-NEXT:    call __powidf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -115,7 +115,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __powidf2 at plt
+; RV64I-NEXT:    call __powidf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -128,26 +128,26 @@ declare double @llvm.sin.f64(double)
 define double @sin_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: sin_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail sin at plt
+; CHECKIFD-NEXT:    tail sin
 ;
 ; RV32IZFINXZDINX-LABEL: sin_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call sin at plt
+; RV32IZFINXZDINX-NEXT:    call sin
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: sin_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail sin at plt
+; RV64IZFINXZDINX-NEXT:    tail sin
 ;
 ; RV32I-LABEL: sin_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sin at plt
+; RV32I-NEXT:    call sin
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -156,7 +156,7 @@ define double @sin_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sin at plt
+; RV64I-NEXT:    call sin
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -169,26 +169,26 @@ declare double @llvm.cos.f64(double)
 define double @cos_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: cos_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail cos at plt
+; CHECKIFD-NEXT:    tail cos
 ;
 ; RV32IZFINXZDINX-LABEL: cos_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call cos at plt
+; RV32IZFINXZDINX-NEXT:    call cos
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: cos_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail cos at plt
+; RV64IZFINXZDINX-NEXT:    tail cos
 ;
 ; RV32I-LABEL: cos_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call cos at plt
+; RV32I-NEXT:    call cos
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -197,7 +197,7 @@ define double @cos_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call cos at plt
+; RV64I-NEXT:    call cos
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -214,10 +214,10 @@ define double @sincos_f64(double %a) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    call sin at plt
+; RV32IFD-NEXT:    call sin
 ; RV32IFD-NEXT:    fmv.d fs1, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs0
-; RV32IFD-NEXT:    call cos at plt
+; RV32IFD-NEXT:    call cos
 ; RV32IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
@@ -232,10 +232,10 @@ define double @sincos_f64(double %a) nounwind {
 ; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
-; RV64IFD-NEXT:    call sin at plt
+; RV64IFD-NEXT:    call sin
 ; RV64IFD-NEXT:    fmv.d fs1, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs0
-; RV64IFD-NEXT:    call cos at plt
+; RV64IFD-NEXT:    call cos
 ; RV64IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
@@ -253,14 +253,14 @@ define double @sincos_f64(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    mv s0, a1
 ; RV32IZFINXZDINX-NEXT:    mv s1, a0
-; RV32IZFINXZDINX-NEXT:    call sin at plt
+; RV32IZFINXZDINX-NEXT:    call sin
 ; RV32IZFINXZDINX-NEXT:    sw a0, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s3, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    mv a0, s1
 ; RV32IZFINXZDINX-NEXT:    mv a1, s0
-; RV32IZFINXZDINX-NEXT:    call cos at plt
+; RV32IZFINXZDINX-NEXT:    call cos
 ; RV32IZFINXZDINX-NEXT:    sw a0, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a0, 0(sp)
@@ -285,10 +285,10 @@ define double @sincos_f64(double %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv s0, a0
-; RV64IZFINXZDINX-NEXT:    call sin at plt
+; RV64IZFINXZDINX-NEXT:    call sin
 ; RV64IZFINXZDINX-NEXT:    mv s1, a0
 ; RV64IZFINXZDINX-NEXT:    mv a0, s0
-; RV64IZFINXZDINX-NEXT:    call cos at plt
+; RV64IZFINXZDINX-NEXT:    call cos
 ; RV64IZFINXZDINX-NEXT:    fadd.d a0, s1, a0
 ; RV64IZFINXZDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -306,17 +306,17 @@ define double @sincos_f64(double %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call sin at plt
+; RV32I-NEXT:    call sin
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call cos at plt
+; RV32I-NEXT:    call cos
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -332,13 +332,13 @@ define double @sincos_f64(double %a) nounwind {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    call sin at plt
+; RV64I-NEXT:    call sin
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call cos at plt
+; RV64I-NEXT:    call cos
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -355,26 +355,26 @@ declare double @llvm.pow.f64(double, double)
 define double @pow_f64(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: pow_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail pow at plt
+; CHECKIFD-NEXT:    tail pow
 ;
 ; RV32IZFINXZDINX-LABEL: pow_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call pow at plt
+; RV32IZFINXZDINX-NEXT:    call pow
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: pow_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail pow at plt
+; RV64IZFINXZDINX-NEXT:    tail pow
 ;
 ; RV32I-LABEL: pow_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call pow at plt
+; RV32I-NEXT:    call pow
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -383,7 +383,7 @@ define double @pow_f64(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call pow at plt
+; RV64I-NEXT:    call pow
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -396,26 +396,26 @@ declare double @llvm.exp.f64(double)
 define double @exp_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: exp_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail exp at plt
+; CHECKIFD-NEXT:    tail exp
 ;
 ; RV32IZFINXZDINX-LABEL: exp_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call exp at plt
+; RV32IZFINXZDINX-NEXT:    call exp
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: exp_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail exp at plt
+; RV64IZFINXZDINX-NEXT:    tail exp
 ;
 ; RV32I-LABEL: exp_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call exp at plt
+; RV32I-NEXT:    call exp
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -424,7 +424,7 @@ define double @exp_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call exp at plt
+; RV64I-NEXT:    call exp
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -437,26 +437,26 @@ declare double @llvm.exp2.f64(double)
 define double @exp2_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: exp2_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail exp2 at plt
+; CHECKIFD-NEXT:    tail exp2
 ;
 ; RV32IZFINXZDINX-LABEL: exp2_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call exp2 at plt
+; RV32IZFINXZDINX-NEXT:    call exp2
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: exp2_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail exp2 at plt
+; RV64IZFINXZDINX-NEXT:    tail exp2
 ;
 ; RV32I-LABEL: exp2_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call exp2 at plt
+; RV32I-NEXT:    call exp2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -465,7 +465,7 @@ define double @exp2_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call exp2 at plt
+; RV64I-NEXT:    call exp2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -478,26 +478,26 @@ declare double @llvm.log.f64(double)
 define double @log_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: log_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail log at plt
+; CHECKIFD-NEXT:    tail log
 ;
 ; RV32IZFINXZDINX-LABEL: log_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call log at plt
+; RV32IZFINXZDINX-NEXT:    call log
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: log_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail log at plt
+; RV64IZFINXZDINX-NEXT:    tail log
 ;
 ; RV32I-LABEL: log_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log at plt
+; RV32I-NEXT:    call log
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -506,7 +506,7 @@ define double @log_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log at plt
+; RV64I-NEXT:    call log
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -519,26 +519,26 @@ declare double @llvm.log10.f64(double)
 define double @log10_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: log10_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail log10 at plt
+; CHECKIFD-NEXT:    tail log10
 ;
 ; RV32IZFINXZDINX-LABEL: log10_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call log10 at plt
+; RV32IZFINXZDINX-NEXT:    call log10
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: log10_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail log10 at plt
+; RV64IZFINXZDINX-NEXT:    tail log10
 ;
 ; RV32I-LABEL: log10_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log10 at plt
+; RV32I-NEXT:    call log10
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -547,7 +547,7 @@ define double @log10_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log10 at plt
+; RV64I-NEXT:    call log10
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -560,26 +560,26 @@ declare double @llvm.log2.f64(double)
 define double @log2_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: log2_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail log2 at plt
+; CHECKIFD-NEXT:    tail log2
 ;
 ; RV32IZFINXZDINX-LABEL: log2_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call log2 at plt
+; RV32IZFINXZDINX-NEXT:    call log2
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: log2_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail log2 at plt
+; RV64IZFINXZDINX-NEXT:    tail log2
 ;
 ; RV32I-LABEL: log2_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log2 at plt
+; RV32I-NEXT:    call log2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -588,7 +588,7 @@ define double @log2_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log2 at plt
+; RV64I-NEXT:    call log2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -636,7 +636,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fma at plt
+; RV32I-NEXT:    call fma
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -645,7 +645,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fma at plt
+; RV64I-NEXT:    call fma
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -697,10 +697,10 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a5
 ; RV32I-NEXT:    mv s1, a4
-; RV32I-NEXT:    call __muldf3 at plt
+; RV32I-NEXT:    call __muldf3
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __adddf3 at plt
+; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -713,9 +713,9 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
-; RV64I-NEXT:    call __muldf3 at plt
+; RV64I-NEXT:    call __muldf3
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __adddf3 at plt
+; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -795,7 +795,7 @@ define double @minnum_f64(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmin at plt
+; RV32I-NEXT:    call fmin
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -804,7 +804,7 @@ define double @minnum_f64(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmin at plt
+; RV64I-NEXT:    call fmin
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -848,7 +848,7 @@ define double @maxnum_f64(double %a, double %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmax at plt
+; RV32I-NEXT:    call fmax
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -857,7 +857,7 @@ define double @maxnum_f64(double %a, double %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmax at plt
+; RV64I-NEXT:    call fmax
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -940,7 +940,7 @@ declare double @llvm.floor.f64(double)
 define double @floor_f64(double %a) nounwind {
 ; RV32IFD-LABEL: floor_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail floor at plt
+; RV32IFD-NEXT:    tail floor
 ;
 ; RV64IFD-LABEL: floor_f64:
 ; RV64IFD:       # %bb.0:
@@ -960,7 +960,7 @@ define double @floor_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call floor at plt
+; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -983,7 +983,7 @@ define double @floor_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call floor at plt
+; RV32I-NEXT:    call floor
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -992,7 +992,7 @@ define double @floor_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call floor at plt
+; RV64I-NEXT:    call floor
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1005,7 +1005,7 @@ declare double @llvm.ceil.f64(double)
 define double @ceil_f64(double %a) nounwind {
 ; RV32IFD-LABEL: ceil_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail ceil at plt
+; RV32IFD-NEXT:    tail ceil
 ;
 ; RV64IFD-LABEL: ceil_f64:
 ; RV64IFD:       # %bb.0:
@@ -1025,7 +1025,7 @@ define double @ceil_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call ceil at plt
+; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1048,7 +1048,7 @@ define double @ceil_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call ceil at plt
+; RV32I-NEXT:    call ceil
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1057,7 +1057,7 @@ define double @ceil_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call ceil at plt
+; RV64I-NEXT:    call ceil
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1070,7 +1070,7 @@ declare double @llvm.trunc.f64(double)
 define double @trunc_f64(double %a) nounwind {
 ; RV32IFD-LABEL: trunc_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail trunc at plt
+; RV32IFD-NEXT:    tail trunc
 ;
 ; RV64IFD-LABEL: trunc_f64:
 ; RV64IFD:       # %bb.0:
@@ -1090,7 +1090,7 @@ define double @trunc_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call trunc at plt
+; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1113,7 +1113,7 @@ define double @trunc_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call trunc at plt
+; RV32I-NEXT:    call trunc
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1122,7 +1122,7 @@ define double @trunc_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call trunc at plt
+; RV64I-NEXT:    call trunc
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1135,7 +1135,7 @@ declare double @llvm.rint.f64(double)
 define double @rint_f64(double %a) nounwind {
 ; RV32IFD-LABEL: rint_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail rint at plt
+; RV32IFD-NEXT:    tail rint
 ;
 ; RV64IFD-LABEL: rint_f64:
 ; RV64IFD:       # %bb.0:
@@ -1155,7 +1155,7 @@ define double @rint_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call rint at plt
+; RV32IZFINXZDINX-NEXT:    call rint
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1178,7 +1178,7 @@ define double @rint_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call rint at plt
+; RV32I-NEXT:    call rint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1187,7 +1187,7 @@ define double @rint_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call rint at plt
+; RV64I-NEXT:    call rint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1200,26 +1200,26 @@ declare double @llvm.nearbyint.f64(double)
 define double @nearbyint_f64(double %a) nounwind {
 ; CHECKIFD-LABEL: nearbyint_f64:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    tail nearbyint at plt
+; CHECKIFD-NEXT:    tail nearbyint
 ;
 ; RV32IZFINXZDINX-LABEL: nearbyint_f64:
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call nearbyint at plt
+; RV32IZFINXZDINX-NEXT:    call nearbyint
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: nearbyint_f64:
 ; RV64IZFINXZDINX:       # %bb.0:
-; RV64IZFINXZDINX-NEXT:    tail nearbyint at plt
+; RV64IZFINXZDINX-NEXT:    tail nearbyint
 ;
 ; RV32I-LABEL: nearbyint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call nearbyint at plt
+; RV32I-NEXT:    call nearbyint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1228,7 +1228,7 @@ define double @nearbyint_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call nearbyint at plt
+; RV64I-NEXT:    call nearbyint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1241,7 +1241,7 @@ declare double @llvm.round.f64(double)
 define double @round_f64(double %a) nounwind {
 ; RV32IFD-LABEL: round_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail round at plt
+; RV32IFD-NEXT:    tail round
 ;
 ; RV64IFD-LABEL: round_f64:
 ; RV64IFD:       # %bb.0:
@@ -1261,7 +1261,7 @@ define double @round_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call round at plt
+; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1284,7 +1284,7 @@ define double @round_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call round at plt
+; RV32I-NEXT:    call round
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1293,7 +1293,7 @@ define double @round_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call round at plt
+; RV64I-NEXT:    call round
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1306,7 +1306,7 @@ declare double @llvm.roundeven.f64(double)
 define double @roundeven_f64(double %a) nounwind {
 ; RV32IFD-LABEL: roundeven_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail roundeven at plt
+; RV32IFD-NEXT:    tail roundeven
 ;
 ; RV64IFD-LABEL: roundeven_f64:
 ; RV64IFD:       # %bb.0:
@@ -1326,7 +1326,7 @@ define double @roundeven_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call roundeven at plt
+; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1349,7 +1349,7 @@ define double @roundeven_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call roundeven at plt
+; RV32I-NEXT:    call roundeven
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1358,7 +1358,7 @@ define double @roundeven_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call roundeven at plt
+; RV64I-NEXT:    call roundeven
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1399,7 +1399,7 @@ define iXLen @lrint_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lrint at plt
+; RV32I-NEXT:    call lrint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1408,7 +1408,7 @@ define iXLen @lrint_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lrint at plt
+; RV64I-NEXT:    call lrint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1450,7 +1450,7 @@ define iXLen @lround_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lround at plt
+; RV32I-NEXT:    call lround
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1459,7 +1459,7 @@ define iXLen @lround_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lround at plt
+; RV64I-NEXT:    call lround
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1493,7 +1493,7 @@ define i32 @lround_i32_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lround at plt
+; RV32I-NEXT:    call lround
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1502,7 +1502,7 @@ define i32 @lround_i32_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lround at plt
+; RV64I-NEXT:    call lround
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1517,7 +1517,7 @@ define i64 @llrint_f64(double %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call llrint at plt
+; RV32IFD-NEXT:    call llrint
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1531,7 +1531,7 @@ define i64 @llrint_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call llrint at plt
+; RV32IZFINXZDINX-NEXT:    call llrint
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1545,7 +1545,7 @@ define i64 @llrint_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llrint at plt
+; RV32I-NEXT:    call llrint
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1554,7 +1554,7 @@ define i64 @llrint_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llrint at plt
+; RV64I-NEXT:    call llrint
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1569,7 +1569,7 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call llround at plt
+; RV32IFD-NEXT:    call llround
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1583,7 +1583,7 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call llround at plt
+; RV32IZFINXZDINX-NEXT:    call llround
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1597,7 +1597,7 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llround at plt
+; RV32I-NEXT:    call llround
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1606,7 +1606,7 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llround at plt
+; RV64I-NEXT:    call llround
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index fb043f44beb39a..6c6f70d6e2ed40 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -217,7 +217,7 @@ define dso_local double @fld_stack(double %a) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call notdead at plt
+; RV32IFD-NEXT:    call notdead
 ; RV32IFD-NEXT:    fld fa5, 8(sp)
 ; RV32IFD-NEXT:    fadd.d fa0, fa5, fs0
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -232,7 +232,7 @@ define dso_local double @fld_stack(double %a) nounwind {
 ; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 8
-; RV64IFD-NEXT:    call notdead at plt
+; RV64IFD-NEXT:    call notdead
 ; RV64IFD-NEXT:    fld fa5, 8(sp)
 ; RV64IFD-NEXT:    fadd.d fa0, fa5, fs0
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -251,7 +251,7 @@ define dso_local double @fld_stack(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw s0, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    addi a0, sp, 8
-; RV32IZFINXZDINX-NEXT:    call notdead at plt
+; RV32IZFINXZDINX-NEXT:    call notdead
 ; RV32IZFINXZDINX-NEXT:    lw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, s0
@@ -272,7 +272,7 @@ define dso_local double @fld_stack(double %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv s0, a0
 ; RV64IZFINXZDINX-NEXT:    addi a0, sp, 8
-; RV64IZFINXZDINX-NEXT:    call notdead at plt
+; RV64IZFINXZDINX-NEXT:    call notdead
 ; RV64IZFINXZDINX-NEXT:    ld a0, 8(sp)
 ; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, s0
 ; RV64IZFINXZDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -294,7 +294,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    fadd.d fa5, fa0, fa1
 ; RV32IFD-NEXT:    fsd fa5, 0(sp)
 ; RV32IFD-NEXT:    mv a0, sp
-; RV32IFD-NEXT:    call notdead at plt
+; RV32IFD-NEXT:    call notdead
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -306,7 +306,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fadd.d fa5, fa0, fa1
 ; RV64IFD-NEXT:    fsd fa5, 0(sp)
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call notdead at plt
+; RV64IFD-NEXT:    call notdead
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -327,7 +327,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw a0, 16(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 20(sp)
 ; RV32IZFINXZDINX-NEXT:    addi a0, sp, 16
-; RV32IZFINXZDINX-NEXT:    call notdead at plt
+; RV32IZFINXZDINX-NEXT:    call notdead
 ; RV32IZFINXZDINX-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 32
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -339,7 +339,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
 ; RV64IZFINXZDINX-NEXT:    sd a0, 0(sp)
 ; RV64IZFINXZDINX-NEXT:    mv a0, sp
-; RV64IZFINXZDINX-NEXT:    call notdead at plt
+; RV64IZFINXZDINX-NEXT:    call notdead
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index aec27b58e1fc49..8b8f538886edbd 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -25,7 +25,7 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    lui a1, 262144
 ; RV32IFD-NEXT:    li a0, 0
-; RV32IFD-NEXT:    call test at plt
+; RV32IFD-NEXT:    call test
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld fa5, 0(sp)
@@ -39,9 +39,9 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV32IFD-NEXT:    bnez a0, .LBB1_3
 ; RV32IFD-NEXT:  # %bb.2: # %if.end
-; RV32IFD-NEXT:    call exit at plt
+; RV32IFD-NEXT:    call exit
 ; RV32IFD-NEXT:  .LBB1_3: # %if.then
-; RV32IFD-NEXT:    call abort at plt
+; RV32IFD-NEXT:    call abort
 ;
 ; RV32IZFINXZDINX-LABEL: main:
 ; RV32IZFINXZDINX:       # %bb.0: # %entry
@@ -49,7 +49,7 @@ define i32 @main() nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    lui a1, 262144
 ; RV32IZFINXZDINX-NEXT:    li a0, 0
-; RV32IZFINXZDINX-NEXT:    call test at plt
+; RV32IZFINXZDINX-NEXT:    call test
 ; RV32IZFINXZDINX-NEXT:    sw a0, 0(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 4(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a0, 0(sp)
@@ -66,9 +66,9 @@ define i32 @main() nounwind {
 ; RV32IZFINXZDINX-NEXT:    flt.d a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    bnez a0, .LBB1_3
 ; RV32IZFINXZDINX-NEXT:  # %bb.2: # %if.end
-; RV32IZFINXZDINX-NEXT:    call exit at plt
+; RV32IZFINXZDINX-NEXT:    call exit
 ; RV32IZFINXZDINX-NEXT:  .LBB1_3: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
+; RV32IZFINXZDINX-NEXT:    call abort
 entry:
   %call = call double @test(double 2.000000e+00)
   %cmp = fcmp olt double %call, 2.400000e-01
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index 5c5b4bb723b686..b8c6e84502408f 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -54,12 +54,12 @@ define i64 @test_floor_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call floor at plt
+; RV32IFD-NEXT:    call floor
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fle.d s0, fa5, fa0
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lui a4, 524288
 ; RV32IFD-NEXT:    lui a2, 524288
 ; RV32IFD-NEXT:    beqz s0, .LBB1_2
@@ -103,7 +103,7 @@ define i64 @test_floor_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call floor at plt
+; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 8(sp)
@@ -112,7 +112,7 @@ define i64 @test_floor_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI1_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI1_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a4, 524288
 ; RV32IZFINXZDINX-NEXT:    lui a2, 524288
 ; RV32IZFINXZDINX-NEXT:    beqz s0, .LBB1_2
@@ -201,7 +201,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call floor at plt
+; RV32IFD-NEXT:    call floor
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
 ; RV32IFD-NEXT:    flt.d a0, fa5, fa0
@@ -209,7 +209,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    fcvt.d.w fa5, zero
 ; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    and a0, s1, a0
 ; RV32IFD-NEXT:    or a0, s0, a0
 ; RV32IFD-NEXT:    and a1, s1, a1
@@ -236,7 +236,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call floor at plt
+; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
@@ -244,7 +244,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
 ; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
 ; RV32IZFINXZDINX-NEXT:    neg s2, a2
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI3_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI3_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI3_0)(a2)
@@ -320,12 +320,12 @@ define i64 @test_ceil_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call ceil at plt
+; RV32IFD-NEXT:    call ceil
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI5_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI5_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fle.d s0, fa5, fa0
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lui a4, 524288
 ; RV32IFD-NEXT:    lui a2, 524288
 ; RV32IFD-NEXT:    beqz s0, .LBB5_2
@@ -369,7 +369,7 @@ define i64 @test_ceil_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call ceil at plt
+; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 8(sp)
@@ -378,7 +378,7 @@ define i64 @test_ceil_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI5_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI5_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a4, 524288
 ; RV32IZFINXZDINX-NEXT:    lui a2, 524288
 ; RV32IZFINXZDINX-NEXT:    beqz s0, .LBB5_2
@@ -467,7 +467,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call ceil at plt
+; RV32IFD-NEXT:    call ceil
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI7_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI7_0)(a0)
 ; RV32IFD-NEXT:    flt.d a0, fa5, fa0
@@ -475,7 +475,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    fcvt.d.w fa5, zero
 ; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    and a0, s1, a0
 ; RV32IFD-NEXT:    or a0, s0, a0
 ; RV32IFD-NEXT:    and a1, s1, a1
@@ -502,7 +502,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call ceil at plt
+; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
@@ -510,7 +510,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
 ; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
 ; RV32IZFINXZDINX-NEXT:    neg s2, a2
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI7_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI7_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI7_0)(a2)
@@ -586,12 +586,12 @@ define i64 @test_trunc_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call trunc at plt
+; RV32IFD-NEXT:    call trunc
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI9_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI9_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fle.d s0, fa5, fa0
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lui a4, 524288
 ; RV32IFD-NEXT:    lui a2, 524288
 ; RV32IFD-NEXT:    beqz s0, .LBB9_2
@@ -635,7 +635,7 @@ define i64 @test_trunc_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call trunc at plt
+; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 8(sp)
@@ -644,7 +644,7 @@ define i64 @test_trunc_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI9_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI9_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a4, 524288
 ; RV32IZFINXZDINX-NEXT:    lui a2, 524288
 ; RV32IZFINXZDINX-NEXT:    beqz s0, .LBB9_2
@@ -733,7 +733,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call trunc at plt
+; RV32IFD-NEXT:    call trunc
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI11_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; RV32IFD-NEXT:    flt.d a0, fa5, fa0
@@ -741,7 +741,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    fcvt.d.w fa5, zero
 ; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    and a0, s1, a0
 ; RV32IFD-NEXT:    or a0, s0, a0
 ; RV32IFD-NEXT:    and a1, s1, a1
@@ -768,7 +768,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call trunc at plt
+; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
@@ -776,7 +776,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
 ; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
 ; RV32IZFINXZDINX-NEXT:    neg s2, a2
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI11_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI11_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI11_0)(a2)
@@ -852,12 +852,12 @@ define i64 @test_round_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call round at plt
+; RV32IFD-NEXT:    call round
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI13_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fle.d s0, fa5, fa0
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lui a4, 524288
 ; RV32IFD-NEXT:    lui a2, 524288
 ; RV32IFD-NEXT:    beqz s0, .LBB13_2
@@ -901,7 +901,7 @@ define i64 @test_round_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call round at plt
+; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 8(sp)
@@ -910,7 +910,7 @@ define i64 @test_round_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI13_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI13_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a4, 524288
 ; RV32IZFINXZDINX-NEXT:    lui a2, 524288
 ; RV32IZFINXZDINX-NEXT:    beqz s0, .LBB13_2
@@ -999,7 +999,7 @@ define i64 @test_round_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call round at plt
+; RV32IFD-NEXT:    call round
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI15_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI15_0)(a0)
 ; RV32IFD-NEXT:    flt.d a0, fa5, fa0
@@ -1007,7 +1007,7 @@ define i64 @test_round_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    fcvt.d.w fa5, zero
 ; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    and a0, s1, a0
 ; RV32IFD-NEXT:    or a0, s0, a0
 ; RV32IFD-NEXT:    and a1, s1, a1
@@ -1034,7 +1034,7 @@ define i64 @test_round_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call round at plt
+; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
@@ -1042,7 +1042,7 @@ define i64 @test_round_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
 ; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
 ; RV32IZFINXZDINX-NEXT:    neg s2, a2
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI15_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI15_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI15_0)(a2)
@@ -1118,12 +1118,12 @@ define i64 @test_roundeven_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call roundeven at plt
+; RV32IFD-NEXT:    call roundeven
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI17_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fle.d s0, fa5, fa0
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lui a4, 524288
 ; RV32IFD-NEXT:    lui a2, 524288
 ; RV32IFD-NEXT:    beqz s0, .LBB17_2
@@ -1167,7 +1167,7 @@ define i64 @test_roundeven_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call roundeven at plt
+; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 8(sp)
@@ -1176,7 +1176,7 @@ define i64 @test_roundeven_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI17_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI17_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a4, 524288
 ; RV32IZFINXZDINX-NEXT:    lui a2, 524288
 ; RV32IZFINXZDINX-NEXT:    beqz s0, .LBB17_2
@@ -1265,7 +1265,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call roundeven at plt
+; RV32IFD-NEXT:    call roundeven
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI19_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
 ; RV32IFD-NEXT:    flt.d a0, fa5, fa0
@@ -1273,7 +1273,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    fcvt.d.w fa5, zero
 ; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    and a0, s1, a0
 ; RV32IFD-NEXT:    or a0, s0, a0
 ; RV32IFD-NEXT:    and a1, s1, a1
@@ -1300,7 +1300,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call roundeven at plt
+; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
@@ -1308,7 +1308,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
 ; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
 ; RV32IZFINXZDINX-NEXT:    neg s2, a2
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI19_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI19_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI19_0)(a2)
@@ -1384,12 +1384,12 @@ define i64 @test_rint_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call rint at plt
+; RV32IFD-NEXT:    call rint
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI21_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fle.d s0, fa5, fa0
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lui a4, 524288
 ; RV32IFD-NEXT:    lui a2, 524288
 ; RV32IFD-NEXT:    beqz s0, .LBB21_2
@@ -1433,7 +1433,7 @@ define i64 @test_rint_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call rint at plt
+; RV32IZFINXZDINX-NEXT:    call rint
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s2, 8(sp)
@@ -1442,7 +1442,7 @@ define i64 @test_rint_si64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI21_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI21_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a4, 524288
 ; RV32IZFINXZDINX-NEXT:    lui a2, 524288
 ; RV32IZFINXZDINX-NEXT:    beqz s0, .LBB21_2
@@ -1531,7 +1531,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call rint at plt
+; RV32IFD-NEXT:    call rint
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI23_0)
 ; RV32IFD-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
 ; RV32IFD-NEXT:    flt.d a0, fa5, fa0
@@ -1539,7 +1539,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    fcvt.d.w fa5, zero
 ; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    and a0, s1, a0
 ; RV32IFD-NEXT:    or a0, s0, a0
 ; RV32IFD-NEXT:    and a1, s1, a1
@@ -1566,7 +1566,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call rint at plt
+; RV32IZFINXZDINX-NEXT:    call rint
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp)
@@ -1574,7 +1574,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
 ; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
 ; RV32IZFINXZDINX-NEXT:    neg s2, a2
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI23_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI23_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI23_0)(a2)
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 6327afd881a53e..094a4105de71c3 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -106,8 +106,8 @@ define i64 @test_floor_si64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call floor at plt
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call floor
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -123,8 +123,8 @@ define i64 @test_floor_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call floor at plt
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call floor
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -236,8 +236,8 @@ define i64 @test_floor_ui64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call floor at plt
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call floor
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -253,8 +253,8 @@ define i64 @test_floor_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call floor at plt
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call floor
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -366,8 +366,8 @@ define i64 @test_ceil_si64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call ceil at plt
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call ceil
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -383,8 +383,8 @@ define i64 @test_ceil_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call ceil at plt
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call ceil
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -496,8 +496,8 @@ define i64 @test_ceil_ui64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call ceil at plt
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call ceil
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -513,8 +513,8 @@ define i64 @test_ceil_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call ceil at plt
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call ceil
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -626,8 +626,8 @@ define i64 @test_trunc_si64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call trunc at plt
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call trunc
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -643,8 +643,8 @@ define i64 @test_trunc_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call trunc at plt
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call trunc
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -756,8 +756,8 @@ define i64 @test_trunc_ui64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call trunc at plt
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call trunc
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -773,8 +773,8 @@ define i64 @test_trunc_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call trunc at plt
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call trunc
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -886,8 +886,8 @@ define i64 @test_round_si64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call round at plt
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call round
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -903,8 +903,8 @@ define i64 @test_round_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call round at plt
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call round
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1016,8 +1016,8 @@ define i64 @test_round_ui64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call round at plt
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call round
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1033,8 +1033,8 @@ define i64 @test_round_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call round at plt
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call round
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1146,8 +1146,8 @@ define i64 @test_roundeven_si64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call roundeven at plt
-; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    call roundeven
+; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1163,8 +1163,8 @@ define i64 @test_roundeven_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call roundeven at plt
-; RV32IZFINXZDINX-NEXT:    call __fixdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call roundeven
+; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1276,8 +1276,8 @@ define i64 @test_roundeven_ui64(double %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call roundeven at plt
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    call roundeven
+; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1293,8 +1293,8 @@ define i64 @test_roundeven_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call roundeven at plt
-; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi at plt
+; RV32IZFINXZDINX-NEXT:    call roundeven
+; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1311,7 +1311,7 @@ define i64 @test_roundeven_ui64(double %x) {
 define double @test_floor_double(double %x) {
 ; RV32IFD-LABEL: test_floor_double:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail floor at plt
+; RV32IFD-NEXT:    tail floor
 ;
 ; RV64IFD-LABEL: test_floor_double:
 ; RV64IFD:       # %bb.0:
@@ -1333,7 +1333,7 @@ define double @test_floor_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call floor at plt
+; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1358,7 +1358,7 @@ define double @test_floor_double(double %x) {
 define double @test_ceil_double(double %x) {
 ; RV32IFD-LABEL: test_ceil_double:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail ceil at plt
+; RV32IFD-NEXT:    tail ceil
 ;
 ; RV64IFD-LABEL: test_ceil_double:
 ; RV64IFD:       # %bb.0:
@@ -1380,7 +1380,7 @@ define double @test_ceil_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call ceil at plt
+; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1405,7 +1405,7 @@ define double @test_ceil_double(double %x) {
 define double @test_trunc_double(double %x) {
 ; RV32IFD-LABEL: test_trunc_double:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail trunc at plt
+; RV32IFD-NEXT:    tail trunc
 ;
 ; RV64IFD-LABEL: test_trunc_double:
 ; RV64IFD:       # %bb.0:
@@ -1427,7 +1427,7 @@ define double @test_trunc_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call trunc at plt
+; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1452,7 +1452,7 @@ define double @test_trunc_double(double %x) {
 define double @test_round_double(double %x) {
 ; RV32IFD-LABEL: test_round_double:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail round at plt
+; RV32IFD-NEXT:    tail round
 ;
 ; RV64IFD-LABEL: test_round_double:
 ; RV64IFD:       # %bb.0:
@@ -1474,7 +1474,7 @@ define double @test_round_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call round at plt
+; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1499,7 +1499,7 @@ define double @test_round_double(double %x) {
 define double @test_roundeven_double(double %x) {
 ; RV32IFD-LABEL: test_roundeven_double:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    tail roundeven at plt
+; RV32IFD-NEXT:    tail roundeven
 ;
 ; RV64IFD-LABEL: test_roundeven_double:
 ; RV64IFD:       # %bb.0:
@@ -1521,7 +1521,7 @@ define double @test_roundeven_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT:    call roundeven at plt
+; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index 2fec986f0ba568..aa88a365431a57 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -23,7 +23,7 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 20(sp)
 ; RV32IFD-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call func at plt
+; RV32IFD-NEXT:    call func
 ; RV32IFD-NEXT:    sw a0, 16(sp)
 ; RV32IFD-NEXT:    sw a1, 20(sp)
 ; RV32IFD-NEXT:    fld fa5, 16(sp)
@@ -48,7 +48,7 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV64IFD-NEXT:    addiw a1, a1, -1
 ; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    fsd fa5, 0(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call func at plt
+; RV64IFD-NEXT:    call func
 ; RV64IFD-NEXT:    fmv.d.x fa5, a0
 ; RV64IFD-NEXT:    fld fa4, 0(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fadd.d fa5, fa5, fa4
@@ -77,7 +77,7 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV32IZFINXZDINX-NEXT:    sw s1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT:    call func at plt
+; RV32IZFINXZDINX-NEXT:    call func
 ; RV32IZFINXZDINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    sw a1, 12(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a0, 8(sp)
@@ -108,7 +108,7 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV64IZFINXZDINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    addiw a1, a1, -1
 ; RV64IZFINXZDINX-NEXT:    mv s0, a0
-; RV64IZFINXZDINX-NEXT:    call func at plt
+; RV64IZFINXZDINX-NEXT:    call func
 ; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, s0
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
index 12606aad215cd0..c4d932acbcc8b5 100644
--- a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
+++ b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
@@ -10,7 +10,7 @@ define void @dwarf() {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    call foo at plt
+; RV32-NEXT:    call foo
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -22,7 +22,7 @@ define void @dwarf() {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    addi a0, sp, 16
-; RV64-NEXT:    call foo at plt
+; RV64-NEXT:    call foo
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/emutls.ll b/llvm/test/CodeGen/RISCV/emutls.ll
index 9ce985ba424269..4f6e9935b9aade 100644
--- a/llvm/test/CodeGen/RISCV/emutls.ll
+++ b/llvm/test/CodeGen/RISCV/emutls.ll
@@ -16,7 +16,7 @@ define ptr @get_external_x() nounwind {
 ; RV32-NEXT:  .Lpcrel_hi0:
 ; RV32-NEXT:    auipc a0, %got_pcrel_hi(__emutls_v.external_x)
 ; RV32-NEXT:    lw a0, %pcrel_lo(.Lpcrel_hi0)(a0)
-; RV32-NEXT:    call __emutls_get_address at plt
+; RV32-NEXT:    call __emutls_get_address
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -28,7 +28,7 @@ define ptr @get_external_x() nounwind {
 ; RV64-NEXT:  .Lpcrel_hi0:
 ; RV64-NEXT:    auipc a0, %got_pcrel_hi(__emutls_v.external_x)
 ; RV64-NEXT:    ld a0, %pcrel_lo(.Lpcrel_hi0)(a0)
-; RV64-NEXT:    call __emutls_get_address at plt
+; RV64-NEXT:    call __emutls_get_address
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -44,7 +44,7 @@ define ptr @get_y() nounwind {
 ; RV32-NEXT:  .Lpcrel_hi1:
 ; RV32-NEXT:    auipc a0, %got_pcrel_hi(__emutls_v.y)
 ; RV32-NEXT:    lw a0, %pcrel_lo(.Lpcrel_hi1)(a0)
-; RV32-NEXT:    call __emutls_get_address at plt
+; RV32-NEXT:    call __emutls_get_address
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -56,7 +56,7 @@ define ptr @get_y() nounwind {
 ; RV64-NEXT:  .Lpcrel_hi1:
 ; RV64-NEXT:    auipc a0, %got_pcrel_hi(__emutls_v.y)
 ; RV64-NEXT:    ld a0, %pcrel_lo(.Lpcrel_hi1)(a0)
-; RV64-NEXT:    call __emutls_get_address at plt
+; RV64-NEXT:    call __emutls_get_address
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -72,7 +72,7 @@ define ptr @get_internal_z() nounwind {
 ; RV32-NEXT:  .Lpcrel_hi2:
 ; RV32-NEXT:    auipc a0, %pcrel_hi(__emutls_v.internal_z)
 ; RV32-NEXT:    addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
-; RV32-NEXT:    call __emutls_get_address at plt
+; RV32-NEXT:    call __emutls_get_address
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -84,7 +84,7 @@ define ptr @get_internal_z() nounwind {
 ; RV64-NEXT:  .Lpcrel_hi2:
 ; RV64-NEXT:    auipc a0, %pcrel_hi(__emutls_v.internal_z)
 ; RV64-NEXT:    addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
-; RV64-NEXT:    call __emutls_get_address at plt
+; RV64-NEXT:    call __emutls_get_address
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
index 6c98525d97659c..067690333fdba7 100644
--- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
+++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
@@ -28,13 +28,13 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV32I-NEXT:  # %bb.1: # %bb2
 ; RV32I-NEXT:  .Ltmp0:
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call bar at plt
+; RV32I-NEXT:    call bar
 ; RV32I-NEXT:  .Ltmp1:
 ; RV32I-NEXT:    j .LBB0_3
 ; RV32I-NEXT:  .LBB0_2: # %bb1
 ; RV32I-NEXT:  .Ltmp2:
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:  .Ltmp3:
 ; RV32I-NEXT:  .LBB0_3: # %end2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -48,7 +48,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call _Unwind_Resume at plt
+; RV32I-NEXT:    call _Unwind_Resume
 ;
 ; RV64I-LABEL: caller:
 ; RV64I:       # %bb.0: # %entry
@@ -65,13 +65,13 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV64I-NEXT:  # %bb.1: # %bb2
 ; RV64I-NEXT:  .Ltmp0:
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call bar at plt
+; RV64I-NEXT:    call bar
 ; RV64I-NEXT:  .Ltmp1:
 ; RV64I-NEXT:    j .LBB0_3
 ; RV64I-NEXT:  .LBB0_2: # %bb1
 ; RV64I-NEXT:  .Ltmp2:
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call foo at plt
+; RV64I-NEXT:    call foo
 ; RV64I-NEXT:  .Ltmp3:
 ; RV64I-NEXT:  .LBB0_3: # %end2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -85,7 +85,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call _Unwind_Resume at plt
+; RV64I-NEXT:    call _Unwind_Resume
 entry:
   %0 = icmp eq ptr %p, null
   br i1 %0, label %bb1, label %bb2
diff --git a/llvm/test/CodeGen/RISCV/fastcc-float.ll b/llvm/test/CodeGen/RISCV/fastcc-float.ll
index c5daa612c671c7..488c97d5a4506c 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-float.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-float.ll
@@ -62,7 +62,7 @@ define float @caller(<32 x float> %A) nounwind {
 ; CHECK-NEXT:    fsw fs2, 8(sp)
 ; CHECK-NEXT:    fsw fs1, 4(sp)
 ; CHECK-NEXT:    fsw fs0, 0(sp)
-; CHECK-NEXT:    call callee at plt
+; CHECK-NEXT:    call callee
 ; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 64
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/fastcc-int.ll b/llvm/test/CodeGen/RISCV/fastcc-int.ll
index 0df0ce183c016c..e4c41a1aa890f4 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-int.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-int.ll
@@ -44,7 +44,7 @@ define i32 @caller(<16 x i32> %A) nounwind {
 ; RV32-NEXT:    sw s0, 4(sp)
 ; RV32-NEXT:    sw t1, 0(sp)
 ; RV32-NEXT:    mv a0, t0
-; RV32-NEXT:    call callee at plt
+; RV32-NEXT:    call callee
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 32
@@ -75,7 +75,7 @@ define i32 @caller(<16 x i32> %A) nounwind {
 ; RV64-NEXT:    sd s0, 8(sp)
 ; RV64-NEXT:    sd t1, 0(sp)
 ; RV64-NEXT:    mv a0, t0
-; RV64-NEXT:    call callee at plt
+; RV64-NEXT:    call callee
 ; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 48
diff --git a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
index e667325db3aa78..fb0b34cf796be0 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
@@ -321,7 +321,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
 ; ZHINX32-NEXT:    lw t3, 52(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw t4, 48(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw t5, 44(sp) # 4-byte Folded Reload
-; ZHINX32-NEXT:    call callee_half_32 at plt
+; ZHINX32-NEXT:    call callee_half_32
 ; ZHINX32-NEXT:    lw ra, 108(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw s0, 104(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw s1, 100(sp) # 4-byte Folded Reload
@@ -405,7 +405,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
 ; ZHINX64-NEXT:    ld t3, 56(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld t4, 48(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld t5, 40(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT:    call callee_half_32 at plt
+; ZHINX64-NEXT:    call callee_half_32
 ; ZHINX64-NEXT:    ld ra, 168(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld s0, 160(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld s1, 152(sp) # 8-byte Folded Reload
@@ -498,7 +498,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
 ; ZFINX32-NEXT:    lw a2, 84(sp) # 4-byte Folded Reload
 ; ZFINX32-NEXT:    lw a3, 80(sp) # 4-byte Folded Reload
 ; ZFINX32-NEXT:    lw a4, 76(sp) # 4-byte Folded Reload
-; ZFINX32-NEXT:    call callee_half_32 at plt
+; ZFINX32-NEXT:    call callee_half_32
 ; ZFINX32-NEXT:    lui a1, 1048560
 ; ZFINX32-NEXT:    or a0, a0, a1
 ; ZFINX32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
@@ -593,7 +593,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
 ; ZFINX64-NEXT:    ld a2, 168(sp) # 8-byte Folded Reload
 ; ZFINX64-NEXT:    ld a3, 160(sp) # 8-byte Folded Reload
 ; ZFINX64-NEXT:    ld a4, 152(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT:    call callee_half_32 at plt
+; ZFINX64-NEXT:    call callee_half_32
 ; ZFINX64-NEXT:    lui a1, 1048560
 ; ZFINX64-NEXT:    or a0, a0, a1
 ; ZFINX64-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
@@ -688,7 +688,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
 ; ZDINX32-NEXT:    lw a2, 84(sp) # 4-byte Folded Reload
 ; ZDINX32-NEXT:    lw a3, 80(sp) # 4-byte Folded Reload
 ; ZDINX32-NEXT:    lw a4, 76(sp) # 4-byte Folded Reload
-; ZDINX32-NEXT:    call callee_half_32 at plt
+; ZDINX32-NEXT:    call callee_half_32
 ; ZDINX32-NEXT:    lui a1, 1048560
 ; ZDINX32-NEXT:    or a0, a0, a1
 ; ZDINX32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
@@ -783,7 +783,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
 ; ZDINX64-NEXT:    ld a2, 168(sp) # 8-byte Folded Reload
 ; ZDINX64-NEXT:    ld a3, 160(sp) # 8-byte Folded Reload
 ; ZDINX64-NEXT:    ld a4, 152(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT:    call callee_half_32 at plt
+; ZDINX64-NEXT:    call callee_half_32
 ; ZDINX64-NEXT:    lui a1, 1048560
 ; ZDINX64-NEXT:    or a0, a0, a1
 ; ZDINX64-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
@@ -901,7 +901,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
 ; ZHINX32-NEXT:    lw t3, 84(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw t4, 80(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw t5, 76(sp) # 4-byte Folded Reload
-; ZHINX32-NEXT:    call callee_float_32 at plt
+; ZHINX32-NEXT:    call callee_float_32
 ; ZHINX32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
 ; ZHINX32-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
@@ -985,7 +985,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
 ; ZHINX64-NEXT:    ld t3, 104(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld t4, 96(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld t5, 88(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT:    call callee_float_32 at plt
+; ZHINX64-NEXT:    call callee_float_32
 ; ZHINX64-NEXT:    ld ra, 216(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld s0, 208(sp) # 8-byte Folded Reload
 ; ZHINX64-NEXT:    ld s1, 200(sp) # 8-byte Folded Reload
@@ -1069,7 +1069,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
 ; ZFINX32-NEXT:    lw t3, 84(sp) # 4-byte Folded Reload
 ; ZFINX32-NEXT:    lw t4, 80(sp) # 4-byte Folded Reload
 ; ZFINX32-NEXT:    lw t5, 76(sp) # 4-byte Folded Reload
-; ZFINX32-NEXT:    call callee_float_32 at plt
+; ZFINX32-NEXT:    call callee_float_32
 ; ZFINX32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; ZFINX32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
 ; ZFINX32-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
@@ -1153,7 +1153,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
 ; ZFINX64-NEXT:    ld t3, 104(sp) # 8-byte Folded Reload
 ; ZFINX64-NEXT:    ld t4, 96(sp) # 8-byte Folded Reload
 ; ZFINX64-NEXT:    ld t5, 88(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT:    call callee_float_32 at plt
+; ZFINX64-NEXT:    call callee_float_32
 ; ZFINX64-NEXT:    ld ra, 216(sp) # 8-byte Folded Reload
 ; ZFINX64-NEXT:    ld s0, 208(sp) # 8-byte Folded Reload
 ; ZFINX64-NEXT:    ld s1, 200(sp) # 8-byte Folded Reload
@@ -1237,7 +1237,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
 ; ZDINX32-NEXT:    lw t3, 84(sp) # 4-byte Folded Reload
 ; ZDINX32-NEXT:    lw t4, 80(sp) # 4-byte Folded Reload
 ; ZDINX32-NEXT:    lw t5, 76(sp) # 4-byte Folded Reload
-; ZDINX32-NEXT:    call callee_float_32 at plt
+; ZDINX32-NEXT:    call callee_float_32
 ; ZDINX32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; ZDINX32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
 ; ZDINX32-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
@@ -1321,7 +1321,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
 ; ZDINX64-NEXT:    ld t3, 104(sp) # 8-byte Folded Reload
 ; ZDINX64-NEXT:    ld t4, 96(sp) # 8-byte Folded Reload
 ; ZDINX64-NEXT:    ld t5, 88(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT:    call callee_float_32 at plt
+; ZDINX64-NEXT:    call callee_float_32
 ; ZDINX64-NEXT:    ld ra, 216(sp) # 8-byte Folded Reload
 ; ZDINX64-NEXT:    ld s0, 208(sp) # 8-byte Folded Reload
 ; ZDINX64-NEXT:    ld s1, 200(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/fli-licm.ll b/llvm/test/CodeGen/RISCV/fli-licm.ll
index 4962a146362d56..ba6b33c4f0a465 100644
--- a/llvm/test/CodeGen/RISCV/fli-licm.ll
+++ b/llvm/test/CodeGen/RISCV/fli-licm.ll
@@ -22,7 +22,7 @@ define void @process_nodes(ptr %0) nounwind {
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NEXT:    fli.s fa0, 1.0
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call do_it at plt
+; RV32-NEXT:    call do_it
 ; RV32-NEXT:    lw s0, 0(s0)
 ; RV32-NEXT:    bnez s0, .LBB0_2
 ; RV32-NEXT:  # %bb.3:
@@ -44,7 +44,7 @@ define void @process_nodes(ptr %0) nounwind {
 ; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NEXT:    fli.s fa0, 1.0
 ; RV64-NEXT:    mv a0, s0
-; RV64-NEXT:    call do_it at plt
+; RV64-NEXT:    call do_it
 ; RV64-NEXT:    ld s0, 0(s0)
 ; RV64-NEXT:    bnez s0, .LBB0_2
 ; RV64-NEXT:  # %bb.3:
diff --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
index 0252c8ca0f7278..90ce034eafd3b5 100644
--- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
@@ -26,7 +26,7 @@ define float @fadd_s(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -35,7 +35,7 @@ define float @fadd_s(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -59,7 +59,7 @@ define float @fsub_s(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __subsf3 at plt
+; RV32I-NEXT:    call __subsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -68,7 +68,7 @@ define float @fsub_s(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __subsf3 at plt
+; RV64I-NEXT:    call __subsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -92,7 +92,7 @@ define float @fmul_s(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -101,7 +101,7 @@ define float @fmul_s(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -125,7 +125,7 @@ define float @fdiv_s(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __divsf3 at plt
+; RV32I-NEXT:    call __divsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -134,7 +134,7 @@ define float @fdiv_s(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __divsf3 at plt
+; RV64I-NEXT:    call __divsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -158,7 +158,7 @@ define float @fsqrt_s(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrtf at plt
+; RV32I-NEXT:    call sqrtf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -167,7 +167,7 @@ define float @fsqrt_s(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrtf at plt
+; RV64I-NEXT:    call sqrtf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -186,7 +186,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call fminf at plt
+; RV32IF-NEXT:    call fminf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -195,7 +195,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call fminf at plt
+; RV64IF-NEXT:    call fminf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -204,7 +204,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fminf at plt
+; RV32I-NEXT:    call fminf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -213,7 +213,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fminf at plt
+; RV64I-NEXT:    call fminf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -222,7 +222,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call fminf at plt
+; RV32IZFINX-NEXT:    call fminf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -231,7 +231,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call fminf at plt
+; RV64IZFINX-NEXT:    call fminf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -245,7 +245,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call fmaxf at plt
+; RV32IF-NEXT:    call fmaxf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -254,7 +254,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call fmaxf at plt
+; RV64IF-NEXT:    call fmaxf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -263,7 +263,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaxf at plt
+; RV32I-NEXT:    call fmaxf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -272,7 +272,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaxf at plt
+; RV64I-NEXT:    call fmaxf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -281,7 +281,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call fmaxf at plt
+; RV32IZFINX-NEXT:    call fmaxf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -290,7 +290,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call fmaxf at plt
+; RV64IZFINX-NEXT:    call fmaxf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -309,7 +309,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -318,7 +318,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -350,12 +350,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -372,12 +372,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -414,17 +414,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s2, a2
 ; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -442,17 +442,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -494,16 +494,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s2, a2
 ; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -522,16 +522,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -570,12 +570,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -591,12 +591,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -632,12 +632,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a1, a0, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -654,12 +654,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 5497827a3f2fac..7a7ebe651c08e1 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -32,7 +32,7 @@ define float @fadd_s(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -41,7 +41,7 @@ define float @fadd_s(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -64,7 +64,7 @@ define float @fsub_s(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __subsf3 at plt
+; RV32I-NEXT:    call __subsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -73,7 +73,7 @@ define float @fsub_s(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __subsf3 at plt
+; RV64I-NEXT:    call __subsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -96,7 +96,7 @@ define float @fmul_s(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -105,7 +105,7 @@ define float @fmul_s(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -128,7 +128,7 @@ define float @fdiv_s(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __divsf3 at plt
+; RV32I-NEXT:    call __divsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -137,7 +137,7 @@ define float @fdiv_s(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __divsf3 at plt
+; RV64I-NEXT:    call __divsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -162,7 +162,7 @@ define float @fsqrt_s(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrtf at plt
+; RV32I-NEXT:    call sqrtf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -171,7 +171,7 @@ define float @fsqrt_s(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrtf at plt
+; RV64I-NEXT:    call sqrtf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -233,10 +233,10 @@ define i32 @fneg_s(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a1, a0, a1
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -247,10 +247,10 @@ define i32 @fneg_s(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a1, a0, a1
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -281,7 +281,7 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    and a0, a0, a1
@@ -299,7 +299,7 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    and a0, a0, a1
@@ -337,11 +337,11 @@ define float @fabs_s(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    slli a0, a0, 1
 ; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -350,11 +350,11 @@ define float @fabs_s(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    slli a0, a0, 33
 ; RV64I-NEXT:    srli a0, a0, 33
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -381,7 +381,7 @@ define float @fmin_s(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fminf at plt
+; RV32I-NEXT:    call fminf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -390,7 +390,7 @@ define float @fmin_s(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fminf at plt
+; RV64I-NEXT:    call fminf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -415,7 +415,7 @@ define float @fmax_s(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaxf at plt
+; RV32I-NEXT:    call fmaxf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -424,7 +424,7 @@ define float @fmax_s(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaxf at plt
+; RV64I-NEXT:    call fmaxf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -449,7 +449,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -458,7 +458,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -490,12 +490,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -512,12 +512,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -555,17 +555,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s2, a2
 ; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -583,17 +583,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -635,16 +635,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s2, a2
 ; RV32I-NEXT:    xor a2, a0, a2
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -663,16 +663,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s2, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -717,7 +717,7 @@ define float @fnmadd_s_3(float %a, float %b, float %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -728,7 +728,7 @@ define float @fnmadd_s_3(float %a, float %b, float %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -766,7 +766,7 @@ define float @fnmadd_nsz(float %a, float %b, float %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -777,7 +777,7 @@ define float @fnmadd_nsz(float %a, float %b, float %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -811,12 +811,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -832,12 +832,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -873,12 +873,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a1, a0, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -895,12 +895,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -929,9 +929,9 @@ define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -943,9 +943,9 @@ define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -980,13 +980,13 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    mv a1, s2
-; RV32I-NEXT:    call __subsf3 at plt
+; RV32I-NEXT:    call __subsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1005,13 +1005,13 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    mv a1, s2
-; RV64I-NEXT:    call __subsf3 at plt
+; RV64I-NEXT:    call __subsf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1052,23 +1052,23 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __subsf3 at plt
+; RV32I-NEXT:    call __subsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1086,23 +1086,23 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __subsf3 at plt
+; RV64I-NEXT:    call __subsf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1144,17 +1144,17 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __subsf3 at plt
+; RV32I-NEXT:    call __subsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1172,17 +1172,17 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __subsf3 at plt
+; RV64I-NEXT:    call __subsf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index 5c50381ad17017..6aa6dedba548f7 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -89,13 +89,13 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32F-NEXT:    mv s0, a1
 ; RV32F-NEXT:    mv s1, a0
-; RV32F-NEXT:    call __adddf3 at plt
+; RV32F-NEXT:    call __adddf3
 ; RV32F-NEXT:    mv a2, a0
 ; RV32F-NEXT:    slli a1, a1, 1
 ; RV32F-NEXT:    srli a3, a1, 1
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
-; RV32F-NEXT:    call __adddf3 at plt
+; RV32F-NEXT:    call __adddf3
 ; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -110,13 +110,13 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32ZFINX-NEXT:    mv s0, a1
 ; RV32ZFINX-NEXT:    mv s1, a0
-; RV32ZFINX-NEXT:    call __adddf3 at plt
+; RV32ZFINX-NEXT:    call __adddf3
 ; RV32ZFINX-NEXT:    mv a2, a0
 ; RV32ZFINX-NEXT:    slli a1, a1, 1
 ; RV32ZFINX-NEXT:    srli a3, a1, 1
 ; RV32ZFINX-NEXT:    mv a0, s1
 ; RV32ZFINX-NEXT:    mv a1, s0
-; RV32ZFINX-NEXT:    call __adddf3 at plt
+; RV32ZFINX-NEXT:    call __adddf3
 ; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -147,11 +147,11 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
-; RV64F-NEXT:    call __adddf3 at plt
+; RV64F-NEXT:    call __adddf3
 ; RV64F-NEXT:    slli a0, a0, 1
 ; RV64F-NEXT:    srli a1, a0, 1
 ; RV64F-NEXT:    mv a0, s0
-; RV64F-NEXT:    call __adddf3 at plt
+; RV64F-NEXT:    call __adddf3
 ; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    addi sp, sp, 16
@@ -163,11 +163,11 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64ZFINX-NEXT:    mv s0, a0
-; RV64ZFINX-NEXT:    call __adddf3 at plt
+; RV64ZFINX-NEXT:    call __adddf3
 ; RV64ZFINX-NEXT:    slli a0, a0, 1
 ; RV64ZFINX-NEXT:    srli a1, a0, 1
 ; RV64ZFINX-NEXT:    mv a0, s0
-; RV64ZFINX-NEXT:    call __adddf3 at plt
+; RV64ZFINX-NEXT:    call __adddf3
 ; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64ZFINX-NEXT:    addi sp, sp, 16
@@ -262,13 +262,13 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32F-NEXT:    mv s0, a1
 ; RV32F-NEXT:    mv s1, a0
-; RV32F-NEXT:    call __muldf3 at plt
+; RV32F-NEXT:    call __muldf3
 ; RV32F-NEXT:    mv a2, a0
 ; RV32F-NEXT:    lui a3, 524288
 ; RV32F-NEXT:    xor a3, a1, a3
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
-; RV32F-NEXT:    call __muldf3 at plt
+; RV32F-NEXT:    call __muldf3
 ; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -283,13 +283,13 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32ZFINX-NEXT:    mv s0, a1
 ; RV32ZFINX-NEXT:    mv s1, a0
-; RV32ZFINX-NEXT:    call __muldf3 at plt
+; RV32ZFINX-NEXT:    call __muldf3
 ; RV32ZFINX-NEXT:    mv a2, a0
 ; RV32ZFINX-NEXT:    lui a3, 524288
 ; RV32ZFINX-NEXT:    xor a3, a1, a3
 ; RV32ZFINX-NEXT:    mv a0, s1
 ; RV32ZFINX-NEXT:    mv a1, s0
-; RV32ZFINX-NEXT:    call __muldf3 at plt
+; RV32ZFINX-NEXT:    call __muldf3
 ; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -320,12 +320,12 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
-; RV64F-NEXT:    call __muldf3 at plt
+; RV64F-NEXT:    call __muldf3
 ; RV64F-NEXT:    li a1, -1
 ; RV64F-NEXT:    slli a1, a1, 63
 ; RV64F-NEXT:    xor a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0
-; RV64F-NEXT:    call __muldf3 at plt
+; RV64F-NEXT:    call __muldf3
 ; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    addi sp, sp, 16
@@ -337,12 +337,12 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64ZFINX-NEXT:    mv s0, a0
-; RV64ZFINX-NEXT:    call __muldf3 at plt
+; RV64ZFINX-NEXT:    call __muldf3
 ; RV64ZFINX-NEXT:    li a1, -1
 ; RV64ZFINX-NEXT:    slli a1, a1, 63
 ; RV64ZFINX-NEXT:    xor a1, a0, a1
 ; RV64ZFINX-NEXT:    mv a0, s0
-; RV64ZFINX-NEXT:    call __muldf3 at plt
+; RV64ZFINX-NEXT:    call __muldf3
 ; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64ZFINX-NEXT:    addi sp, sp, 16
@@ -442,13 +442,13 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32F-NEXT:    mv s0, a1
 ; RV32F-NEXT:    mv s1, a0
-; RV32F-NEXT:    call __muldf3 at plt
+; RV32F-NEXT:    call __muldf3
 ; RV32F-NEXT:    mv a2, a0
 ; RV32F-NEXT:    lui a3, 524288
 ; RV32F-NEXT:    or a3, a1, a3
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
-; RV32F-NEXT:    call __muldf3 at plt
+; RV32F-NEXT:    call __muldf3
 ; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -463,13 +463,13 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32ZFINX-NEXT:    mv s0, a1
 ; RV32ZFINX-NEXT:    mv s1, a0
-; RV32ZFINX-NEXT:    call __muldf3 at plt
+; RV32ZFINX-NEXT:    call __muldf3
 ; RV32ZFINX-NEXT:    mv a2, a0
 ; RV32ZFINX-NEXT:    lui a3, 524288
 ; RV32ZFINX-NEXT:    or a3, a1, a3
 ; RV32ZFINX-NEXT:    mv a0, s1
 ; RV32ZFINX-NEXT:    mv a1, s0
-; RV32ZFINX-NEXT:    call __muldf3 at plt
+; RV32ZFINX-NEXT:    call __muldf3
 ; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -501,12 +501,12 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
-; RV64F-NEXT:    call __muldf3 at plt
+; RV64F-NEXT:    call __muldf3
 ; RV64F-NEXT:    li a1, -1
 ; RV64F-NEXT:    slli a1, a1, 63
 ; RV64F-NEXT:    or a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0
-; RV64F-NEXT:    call __muldf3 at plt
+; RV64F-NEXT:    call __muldf3
 ; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    addi sp, sp, 16
@@ -518,12 +518,12 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64ZFINX-NEXT:    mv s0, a0
-; RV64ZFINX-NEXT:    call __muldf3 at plt
+; RV64ZFINX-NEXT:    call __muldf3
 ; RV64ZFINX-NEXT:    li a1, -1
 ; RV64ZFINX-NEXT:    slli a1, a1, 63
 ; RV64ZFINX-NEXT:    or a1, a0, a1
 ; RV64ZFINX-NEXT:    mv a0, s0
-; RV64ZFINX-NEXT:    call __muldf3 at plt
+; RV64ZFINX-NEXT:    call __muldf3
 ; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64ZFINX-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index 71b0f77015b562..35caa627b57bc3 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -22,7 +22,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB0_2: # %if.else
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_false:
 ; RV64IF:       # %bb.0:
@@ -33,7 +33,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB0_2: # %if.else
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_false:
 ; RV32IZFINX:       # %bb.0:
@@ -44,7 +44,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB0_2: # %if.else
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_false:
 ; RV64IZFINX:       # %bb.0:
@@ -55,7 +55,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB0_2: # %if.else
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp false float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -75,7 +75,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB1_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_oeq:
 ; RV64IF:       # %bb.0:
@@ -86,7 +86,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB1_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_oeq:
 ; RV32IZFINX:       # %bb.0:
@@ -97,7 +97,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB1_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_oeq:
 ; RV64IZFINX:       # %bb.0:
@@ -108,7 +108,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB1_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp oeq float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -131,7 +131,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB2_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_oeq_alt:
 ; RV64IF:       # %bb.0:
@@ -142,7 +142,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB2_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_oeq_alt:
 ; RV32IZFINX:       # %bb.0:
@@ -153,7 +153,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB2_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_oeq_alt:
 ; RV64IZFINX:       # %bb.0:
@@ -164,7 +164,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB2_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp oeq float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -184,7 +184,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB3_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_ogt:
 ; RV64IF:       # %bb.0:
@@ -195,7 +195,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB3_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_ogt:
 ; RV32IZFINX:       # %bb.0:
@@ -206,7 +206,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB3_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_ogt:
 ; RV64IZFINX:       # %bb.0:
@@ -217,7 +217,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB3_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp ogt float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -237,7 +237,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB4_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_oge:
 ; RV64IF:       # %bb.0:
@@ -248,7 +248,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB4_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_oge:
 ; RV32IZFINX:       # %bb.0:
@@ -259,7 +259,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB4_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_oge:
 ; RV64IZFINX:       # %bb.0:
@@ -270,7 +270,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB4_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp oge float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -290,7 +290,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB5_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_olt:
 ; RV64IF:       # %bb.0:
@@ -301,7 +301,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB5_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_olt:
 ; RV32IZFINX:       # %bb.0:
@@ -312,7 +312,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB5_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_olt:
 ; RV64IZFINX:       # %bb.0:
@@ -323,7 +323,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB5_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp olt float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -343,7 +343,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB6_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_ole:
 ; RV64IF:       # %bb.0:
@@ -354,7 +354,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB6_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_ole:
 ; RV32IZFINX:       # %bb.0:
@@ -365,7 +365,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB6_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_ole:
 ; RV64IZFINX:       # %bb.0:
@@ -376,7 +376,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB6_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp ole float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -398,7 +398,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB7_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_one:
 ; RV64IF:       # %bb.0:
@@ -411,7 +411,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB7_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_one:
 ; RV32IZFINX:       # %bb.0:
@@ -424,7 +424,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB7_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_one:
 ; RV64IZFINX:       # %bb.0:
@@ -437,7 +437,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB7_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp one float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -459,7 +459,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB8_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_ord:
 ; RV64IF:       # %bb.0:
@@ -472,7 +472,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB8_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_ord:
 ; RV32IZFINX:       # %bb.0:
@@ -485,7 +485,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB8_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_ord:
 ; RV64IZFINX:       # %bb.0:
@@ -498,7 +498,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB8_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp ord float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -520,7 +520,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB9_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_ueq:
 ; RV64IF:       # %bb.0:
@@ -533,7 +533,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB9_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_ueq:
 ; RV32IZFINX:       # %bb.0:
@@ -546,7 +546,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB9_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_ueq:
 ; RV64IZFINX:       # %bb.0:
@@ -559,7 +559,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB9_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp ueq float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -579,7 +579,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB10_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_ugt:
 ; RV64IF:       # %bb.0:
@@ -590,7 +590,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB10_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_ugt:
 ; RV32IZFINX:       # %bb.0:
@@ -601,7 +601,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB10_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_ugt:
 ; RV64IZFINX:       # %bb.0:
@@ -612,7 +612,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB10_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp ugt float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -632,7 +632,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB11_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_uge:
 ; RV64IF:       # %bb.0:
@@ -643,7 +643,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB11_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_uge:
 ; RV32IZFINX:       # %bb.0:
@@ -654,7 +654,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB11_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_uge:
 ; RV64IZFINX:       # %bb.0:
@@ -665,7 +665,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB11_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp uge float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -685,7 +685,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB12_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_ult:
 ; RV64IF:       # %bb.0:
@@ -696,7 +696,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB12_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_ult:
 ; RV32IZFINX:       # %bb.0:
@@ -707,7 +707,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB12_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_ult:
 ; RV64IZFINX:       # %bb.0:
@@ -718,7 +718,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB12_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp ult float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -738,7 +738,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB13_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_ule:
 ; RV64IF:       # %bb.0:
@@ -749,7 +749,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB13_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_ule:
 ; RV32IZFINX:       # %bb.0:
@@ -760,7 +760,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB13_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_ule:
 ; RV64IZFINX:       # %bb.0:
@@ -771,7 +771,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB13_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp ule float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -791,7 +791,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB14_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_une:
 ; RV64IF:       # %bb.0:
@@ -802,7 +802,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB14_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_une:
 ; RV32IZFINX:       # %bb.0:
@@ -813,7 +813,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB14_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_une:
 ; RV64IZFINX:       # %bb.0:
@@ -824,7 +824,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB14_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp une float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -846,7 +846,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB15_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_uno:
 ; RV64IF:       # %bb.0:
@@ -859,7 +859,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB15_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_uno:
 ; RV32IZFINX:       # %bb.0:
@@ -872,7 +872,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB15_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_uno:
 ; RV64IZFINX:       # %bb.0:
@@ -885,7 +885,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB15_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp uno float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -905,7 +905,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
 ; RV32IF-NEXT:  .LBB16_2: # %if.then
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_true:
 ; RV64IF:       # %bb.0:
@@ -916,7 +916,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
 ; RV64IF-NEXT:  .LBB16_2: # %if.then
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_true:
 ; RV32IZFINX:       # %bb.0:
@@ -927,7 +927,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:  .LBB16_2: # %if.then
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_true:
 ; RV64IZFINX:       # %bb.0:
@@ -938,7 +938,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:  .LBB16_2: # %if.then
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
   %1 = fcmp true float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -958,12 +958,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x fs0, zero
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call dummy at plt
+; RV32IF-NEXT:    call dummy
 ; RV32IF-NEXT:    feq.s a0, fa0, fs0
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.1: # %if.end
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call dummy at plt
+; RV32IF-NEXT:    call dummy
 ; RV32IF-NEXT:    feq.s a0, fa0, fs0
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.2: # %if.end4
@@ -973,7 +973,7 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB17_3: # %if.then
-; RV32IF-NEXT:    call abort at plt
+; RV32IF-NEXT:    call abort
 ;
 ; RV64IF-LABEL: br_fcmp_store_load_stack_slot:
 ; RV64IF:       # %bb.0: # %entry
@@ -982,12 +982,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x fs0, zero
 ; RV64IF-NEXT:    fmv.s fa0, fs0
-; RV64IF-NEXT:    call dummy at plt
+; RV64IF-NEXT:    call dummy
 ; RV64IF-NEXT:    feq.s a0, fa0, fs0
 ; RV64IF-NEXT:    beqz a0, .LBB17_3
 ; RV64IF-NEXT:  # %bb.1: # %if.end
 ; RV64IF-NEXT:    fmv.s fa0, fs0
-; RV64IF-NEXT:    call dummy at plt
+; RV64IF-NEXT:    call dummy
 ; RV64IF-NEXT:    feq.s a0, fa0, fs0
 ; RV64IF-NEXT:    beqz a0, .LBB17_3
 ; RV64IF-NEXT:  # %bb.2: # %if.end4
@@ -997,19 +997,19 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB17_3: # %if.then
-; RV64IF-NEXT:    call abort at plt
+; RV64IF-NEXT:    call abort
 ;
 ; RV32IZFINX-LABEL: br_fcmp_store_load_stack_slot:
 ; RV32IZFINX:       # %bb.0: # %entry
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    li a0, 0
-; RV32IZFINX-NEXT:    call dummy at plt
+; RV32IZFINX-NEXT:    call dummy
 ; RV32IZFINX-NEXT:    feq.s a0, a0, zero
 ; RV32IZFINX-NEXT:    beqz a0, .LBB17_3
 ; RV32IZFINX-NEXT:  # %bb.1: # %if.end
 ; RV32IZFINX-NEXT:    li a0, 0
-; RV32IZFINX-NEXT:    call dummy at plt
+; RV32IZFINX-NEXT:    call dummy
 ; RV32IZFINX-NEXT:    feq.s a0, a0, zero
 ; RV32IZFINX-NEXT:    beqz a0, .LBB17_3
 ; RV32IZFINX-NEXT:  # %bb.2: # %if.end4
@@ -1018,19 +1018,19 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
 ; RV32IZFINX-NEXT:  .LBB17_3: # %if.then
-; RV32IZFINX-NEXT:    call abort at plt
+; RV32IZFINX-NEXT:    call abort
 ;
 ; RV64IZFINX-LABEL: br_fcmp_store_load_stack_slot:
 ; RV64IZFINX:       # %bb.0: # %entry
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    li a0, 0
-; RV64IZFINX-NEXT:    call dummy at plt
+; RV64IZFINX-NEXT:    call dummy
 ; RV64IZFINX-NEXT:    feq.s a0, a0, zero
 ; RV64IZFINX-NEXT:    beqz a0, .LBB17_3
 ; RV64IZFINX-NEXT:  # %bb.1: # %if.end
 ; RV64IZFINX-NEXT:    li a0, 0
-; RV64IZFINX-NEXT:    call dummy at plt
+; RV64IZFINX-NEXT:    call dummy
 ; RV64IZFINX-NEXT:    feq.s a0, a0, zero
 ; RV64IZFINX-NEXT:    beqz a0, .LBB17_3
 ; RV64IZFINX-NEXT:  # %bb.2: # %if.end4
@@ -1039,7 +1039,7 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
 ; RV64IZFINX-NEXT:  .LBB17_3: # %if.then
-; RV64IZFINX-NEXT:    call abort at plt
+; RV64IZFINX-NEXT:    call abort
 entry:
   %call = call float @dummy(float 0.000000e+00)
   %cmp = fcmp une float %call, 0.000000e+00
diff --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
index 6168ade0839fb1..402d6f0362e6d3 100644
--- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
@@ -35,7 +35,7 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -44,7 +44,7 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixsfsi at plt
+; RV64I-NEXT:    call __fixsfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -68,7 +68,7 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -77,7 +77,7 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    call __fixunssfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -107,7 +107,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    seqz a1, a0
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -118,7 +118,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    call __fixunssfsi
 ; RV64I-NEXT:    seqz a1, a0
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -145,7 +145,7 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -155,7 +155,7 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -182,7 +182,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -192,7 +192,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -216,7 +216,7 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -226,7 +226,7 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -265,7 +265,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -275,7 +275,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -289,7 +289,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -303,7 +303,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -317,7 +317,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixsfdi at plt
+; RV32I-NEXT:    call __fixsfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -326,7 +326,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -340,7 +340,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -354,7 +354,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -368,7 +368,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfdi at plt
+; RV32I-NEXT:    call __fixunssfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -377,7 +377,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -391,7 +391,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __floatdisf at plt
+; RV32IF-NEXT:    call __floatdisf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -405,7 +405,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __floatdisf at plt
+; RV32IZFINX-NEXT:    call __floatdisf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -419,7 +419,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatdisf at plt
+; RV32I-NEXT:    call __floatdisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -428,7 +428,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatdisf at plt
+; RV64I-NEXT:    call __floatdisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -442,7 +442,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __floatundisf at plt
+; RV32IF-NEXT:    call __floatundisf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -456,7 +456,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __floatundisf at plt
+; RV32IZFINX-NEXT:    call __floatundisf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -470,7 +470,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatundisf at plt
+; RV32I-NEXT:    call __floatundisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -479,7 +479,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatundisf at plt
+; RV64I-NEXT:    call __floatundisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -503,7 +503,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -512,7 +512,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -536,7 +536,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -545,7 +545,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -569,7 +569,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -578,7 +578,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -602,7 +602,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -611,7 +611,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -661,7 +661,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -679,7 +679,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    sw a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -732,7 +732,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -750,7 +750,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    sw a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 235979b122215a..f1e444b5b624b4 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -27,7 +27,7 @@ define i32 @fcvt_w_s(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -36,7 +36,7 @@ define i32 @fcvt_w_s(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixsfsi at plt
+; RV64I-NEXT:    call __fixsfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -73,10 +73,10 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 847872
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui s3, 524288
 ; RV32I-NEXT:    bgez s2, .LBB1_2
@@ -86,14 +86,14 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    lui a1, 323584
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB1_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    addi s1, s3, -1
 ; RV32I-NEXT:  .LBB1_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -115,10 +115,10 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 847872
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bgez s2, .LBB1_2
@@ -128,14 +128,14 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    lui a1, 323584
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB1_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    addiw s1, s3, -1
 ; RV64I-NEXT:  .LBB1_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -167,7 +167,7 @@ define i32 @fcvt_wu_s(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -176,7 +176,7 @@ define i32 @fcvt_wu_s(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    call __fixunssfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -205,7 +205,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    seqz a1, a0
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -216,7 +216,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    call __fixunssfsi
 ; RV64I-NEXT:    seqz a1, a0
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -277,16 +277,16 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s1, a0, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    and s1, s1, a0
 ; RV32I-NEXT:    lui a1, 325632
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg a0, a0
 ; RV32I-NEXT:    or a0, a0, s1
@@ -305,15 +305,15 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a1, 325632
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB4_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, -1
@@ -352,7 +352,7 @@ define i32 @fmv_x_w(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -361,7 +361,7 @@ define i32 @fmv_x_w(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -386,7 +386,7 @@ define float @fcvt_s_w(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -396,7 +396,7 @@ define float @fcvt_s_w(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -422,7 +422,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -432,7 +432,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -456,7 +456,7 @@ define float @fcvt_s_wu(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -466,7 +466,7 @@ define float @fcvt_s_wu(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -504,7 +504,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -514,7 +514,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -540,7 +540,7 @@ define float @fmv_w_x(i32 %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -549,7 +549,7 @@ define float @fmv_w_x(i32 %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -565,7 +565,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -579,7 +579,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -593,7 +593,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixsfdi at plt
+; RV32I-NEXT:    call __fixsfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -602,7 +602,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -621,7 +621,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    lui a0, 913408
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fle.s s0, fa5, fa0
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    beqz s0, .LBB12_2
@@ -668,7 +668,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IZFINX-NEXT:    lui a0, 913408
 ; RV32IZFINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lui a4, 524288
 ; RV32IZFINX-NEXT:    lui a2, 524288
 ; RV32IZFINX-NEXT:    beqz s1, .LBB12_2
@@ -717,10 +717,10 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 913408
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfdi at plt
+; RV32I-NEXT:    call __fixsfdi
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    lui s5, 524288
@@ -731,7 +731,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    lui a1, 389120
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    blez a0, .LBB12_4
 ; RV32I-NEXT:  # %bb.3: # %start
@@ -739,7 +739,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32I-NEXT:  .LBB12_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a1, a0, s3
@@ -770,10 +770,10 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 913408
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li s3, -1
 ; RV64I-NEXT:    bgez s2, .LBB12_2
@@ -783,14 +783,14 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    lui a1, 389120
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB12_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    srli s1, s3, 1
 ; RV64I-NEXT:  .LBB12_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -812,7 +812,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -826,7 +826,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -840,7 +840,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfdi at plt
+; RV32I-NEXT:    call __fixunssfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -849,7 +849,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -868,7 +868,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    fmv.w.x fa5, zero
 ; RV32IF-NEXT:    fle.s a0, fa5, fa0
 ; RV32IF-NEXT:    neg s0, a0
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI14_0)
 ; RV32IF-NEXT:    flw fa5, %lo(.LCPI14_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
@@ -902,7 +902,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IZFINX-NEXT:    fle.s a0, zero, a0
 ; RV32IZFINX-NEXT:    neg s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI14_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI14_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s1, a0
@@ -936,17 +936,17 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixunssfdi at plt
+; RV32I-NEXT:    call __fixunssfdi
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    and s3, s2, a0
 ; RV32I-NEXT:    lui a1, 391168
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg a1, a0
 ; RV32I-NEXT:    or a0, a1, s3
@@ -968,16 +968,16 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    addi s1, a0, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    and s1, s1, a0
 ; RV64I-NEXT:    lui a1, 391168
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    neg a0, a0
 ; RV64I-NEXT:    or a0, a0, s1
@@ -997,7 +997,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __floatdisf at plt
+; RV32IF-NEXT:    call __floatdisf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1011,7 +1011,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __floatdisf at plt
+; RV32IZFINX-NEXT:    call __floatdisf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1025,7 +1025,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatdisf at plt
+; RV32I-NEXT:    call __floatdisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1034,7 +1034,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatdisf at plt
+; RV64I-NEXT:    call __floatdisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1047,7 +1047,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __floatundisf at plt
+; RV32IF-NEXT:    call __floatundisf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1061,7 +1061,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __floatundisf at plt
+; RV32IZFINX-NEXT:    call __floatundisf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1075,7 +1075,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatundisf at plt
+; RV32I-NEXT:    call __floatundisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1084,7 +1084,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatundisf at plt
+; RV64I-NEXT:    call __floatundisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1107,7 +1107,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1116,7 +1116,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1139,7 +1139,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1148,7 +1148,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1171,7 +1171,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1180,7 +1180,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1203,7 +1203,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1212,7 +1212,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1261,7 +1261,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatsisf at plt
+; RV32I-NEXT:    call __floatsisf
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1279,7 +1279,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    call __floatsisf
 ; RV64I-NEXT:    sw a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1332,7 +1332,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatunsisf at plt
+; RV32I-NEXT:    call __floatunsisf
 ; RV32I-NEXT:    sw a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1350,7 +1350,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    call __floatunsisf
 ; RV64I-NEXT:    sw a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1389,7 +1389,7 @@ define signext i16 @fcvt_w_s_i16(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1398,7 +1398,7 @@ define signext i16 @fcvt_w_s_i16(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1470,10 +1470,10 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 815104
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    bgez s2, .LBB24_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -1482,7 +1482,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV32I-NEXT:    lui a0, 290816
 ; RV32I-NEXT:    addi a1, a0, -512
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB24_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    lui s1, 8
@@ -1490,7 +1490,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV32I-NEXT:  .LBB24_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -1512,10 +1512,10 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 815104
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    bgez s2, .LBB24_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -1524,7 +1524,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV64I-NEXT:    lui a0, 290816
 ; RV64I-NEXT:    addiw a1, a0, -512
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB24_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    lui s1, 8
@@ -1532,7 +1532,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV64I-NEXT:  .LBB24_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -1575,7 +1575,7 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1584,7 +1584,7 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1640,15 +1640,15 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 292864
 ; RV32I-NEXT:    addi a1, a0, -256
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    blez a0, .LBB26_2
@@ -1677,15 +1677,15 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 292864
 ; RV64I-NEXT:    addiw a1, a0, -256
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    blez a0, .LBB26_2
@@ -1735,7 +1735,7 @@ define signext i8 @fcvt_w_s_i8(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1744,7 +1744,7 @@ define signext i8 @fcvt_w_s_i8(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1814,10 +1814,10 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 798720
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    bgez s2, .LBB28_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -1825,14 +1825,14 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV32I-NEXT:  .LBB28_2: # %start
 ; RV32I-NEXT:    lui a1, 274400
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB28_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    li s1, 127
 ; RV32I-NEXT:  .LBB28_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -1854,10 +1854,10 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 798720
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    bgez s2, .LBB28_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -1865,14 +1865,14 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV64I-NEXT:  .LBB28_2: # %start
 ; RV64I-NEXT:    lui a1, 274400
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB28_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    li s1, 127
 ; RV64I-NEXT:  .LBB28_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -1915,7 +1915,7 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1924,7 +1924,7 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1978,14 +1978,14 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a1, 276464
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB30_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    li a0, 255
@@ -2012,14 +2012,14 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a1, 276464
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB30_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, 255
@@ -2091,16 +2091,16 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s1, a0, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    and s1, s1, a0
 ; RV32I-NEXT:    lui a1, 325632
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg a0, a0
 ; RV32I-NEXT:    or a0, a0, s1
@@ -2119,15 +2119,15 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a1, 325632
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB31_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, -1
@@ -2180,10 +2180,10 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 847872
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui s3, 524288
 ; RV32I-NEXT:    bgez s2, .LBB32_2
@@ -2193,14 +2193,14 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
 ; RV32I-NEXT:    lui a1, 323584
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB32_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    addi s1, s3, -1
 ; RV32I-NEXT:  .LBB32_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -2222,10 +2222,10 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 847872
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bgez s2, .LBB32_2
@@ -2235,14 +2235,14 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
 ; RV64I-NEXT:    lui a1, 323584
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB32_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    addi s1, s3, -1
 ; RV64I-NEXT:  .LBB32_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
index 36eb58fe74542c..dae9f3e089cf4a 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
@@ -31,7 +31,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -41,7 +41,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -74,7 +74,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -84,7 +84,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -116,7 +116,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -127,7 +127,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -160,7 +160,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    call __ltsf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -170,7 +170,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    call __ltsf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -202,7 +202,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    call __lesf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -212,7 +212,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    call __lesf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -261,11 +261,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    snez s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    and a0, a0, s2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -284,11 +284,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -321,7 +321,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -331,7 +331,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -382,11 +382,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    or a0, a0, s2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -405,11 +405,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -447,7 +447,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    call __lesf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -457,7 +457,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    call __lesf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -491,7 +491,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    call __ltsf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -502,7 +502,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    call __ltsf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -537,7 +537,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -547,7 +547,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -581,7 +581,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -591,7 +591,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -618,7 +618,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __nesf2 at plt
+; RV32I-NEXT:    call __nesf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -628,7 +628,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __nesf2 at plt
+; RV64I-NEXT:    call __nesf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -659,7 +659,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -669,7 +669,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -698,7 +698,7 @@ define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -708,7 +708,7 @@ define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -734,7 +734,7 @@ define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -744,7 +744,7 @@ define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -769,7 +769,7 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -780,7 +780,7 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -806,7 +806,7 @@ define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    call __ltsf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -816,7 +816,7 @@ define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    call __ltsf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -841,7 +841,7 @@ define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    call __lesf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -851,7 +851,7 @@ define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    call __lesf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -885,11 +885,11 @@ define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    snez s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    and a0, a0, s2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -908,11 +908,11 @@ define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -945,7 +945,7 @@ define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -955,7 +955,7 @@ define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -991,11 +991,11 @@ define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    or a0, a0, s2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1014,11 +1014,11 @@ define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1049,7 +1049,7 @@ define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    call __lesf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1059,7 +1059,7 @@ define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    call __lesf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1086,7 +1086,7 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    call __ltsf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1097,7 +1097,7 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    call __ltsf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1125,7 +1125,7 @@ define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1135,7 +1135,7 @@ define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1162,7 +1162,7 @@ define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1172,7 +1172,7 @@ define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1203,7 +1203,7 @@ define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __nesf2 at plt
+; RV32I-NEXT:    call __nesf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1213,7 +1213,7 @@ define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __nesf2 at plt
+; RV64I-NEXT:    call __nesf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1244,7 +1244,7 @@ define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1254,7 +1254,7 @@ define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll
index b4fbed1321e27e..265d553a3e5d91 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll
@@ -52,7 +52,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -62,7 +62,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -87,7 +87,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -97,7 +97,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -122,7 +122,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -133,7 +133,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -159,7 +159,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    call __ltsf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -169,7 +169,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    call __ltsf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -194,7 +194,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    call __lesf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -204,7 +204,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    call __lesf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -238,11 +238,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    snez s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    and a0, a0, s2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -261,11 +261,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -298,7 +298,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -308,7 +308,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -344,11 +344,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz s2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    or a0, a0, s2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -367,11 +367,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, a0, s2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -402,7 +402,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    call __lesf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -412,7 +412,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    call __lesf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -439,7 +439,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    call __ltsf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -450,7 +450,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    call __ltsf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -478,7 +478,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -488,7 +488,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -515,7 +515,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    slti a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -525,7 +525,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    slti a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -552,7 +552,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __nesf2 at plt
+; RV32I-NEXT:    call __nesf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -562,7 +562,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __nesf2 at plt
+; RV64I-NEXT:    call __nesf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -593,7 +593,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -603,7 +603,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/float-frem.ll b/llvm/test/CodeGen/RISCV/float-frem.ll
index 6c15da0cca7bf7..651b1b116adc76 100644
--- a/llvm/test/CodeGen/RISCV/float-frem.ll
+++ b/llvm/test/CodeGen/RISCV/float-frem.ll
@@ -15,21 +15,21 @@
 define float @frem_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: frem_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail fmodf at plt
+; RV32IF-NEXT:    tail fmodf
 ;
 ; RV64IF-LABEL: frem_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail fmodf at plt
+; RV64IF-NEXT:    tail fmodf
 ;
 ; RV32IZFINX-LABEL: frem_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail fmodf at plt
+; RV32IZFINX-NEXT:    tail fmodf
 ;
 ; RV64IZFINX-LABEL: frem_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call fmodf at plt
+; RV64IZFINX-NEXT:    call fmodf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -38,7 +38,7 @@ define float @frem_f32(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmodf at plt
+; RV32I-NEXT:    call fmodf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -47,7 +47,7 @@ define float @frem_f32(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmodf at plt
+; RV64I-NEXT:    call fmodf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index d149b35f61c8e7..626db1985bfc7e 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -35,7 +35,7 @@ define float @sqrt_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrtf at plt
+; RV32I-NEXT:    call sqrtf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -44,7 +44,7 @@ define float @sqrt_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrtf at plt
+; RV64I-NEXT:    call sqrtf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -59,7 +59,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __powisf2 at plt
+; RV32IF-NEXT:    call __powisf2
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -69,7 +69,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    sext.w a0, a0
-; RV64IF-NEXT:    call __powisf2 at plt
+; RV64IF-NEXT:    call __powisf2
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -78,7 +78,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call __powisf2 at plt
+; RV32IZFINX-NEXT:    call __powisf2
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -88,7 +88,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    sext.w a1, a1
-; RV64IZFINX-NEXT:    call __powisf2 at plt
+; RV64IZFINX-NEXT:    call __powisf2
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -97,7 +97,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __powisf2 at plt
+; RV32I-NEXT:    call __powisf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -107,7 +107,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __powisf2 at plt
+; RV64I-NEXT:    call __powisf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -122,7 +122,7 @@ define float @sin_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call sinf at plt
+; RV32IF-NEXT:    call sinf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -131,7 +131,7 @@ define float @sin_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call sinf at plt
+; RV64IF-NEXT:    call sinf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -140,7 +140,7 @@ define float @sin_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call sinf at plt
+; RV32IZFINX-NEXT:    call sinf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -149,7 +149,7 @@ define float @sin_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call sinf at plt
+; RV64IZFINX-NEXT:    call sinf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -158,7 +158,7 @@ define float @sin_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sinf at plt
+; RV32I-NEXT:    call sinf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -167,7 +167,7 @@ define float @sin_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sinf at plt
+; RV64I-NEXT:    call sinf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -182,7 +182,7 @@ define float @cos_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call cosf at plt
+; RV32IF-NEXT:    call cosf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -191,7 +191,7 @@ define float @cos_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call cosf at plt
+; RV64IF-NEXT:    call cosf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -200,7 +200,7 @@ define float @cos_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call cosf at plt
+; RV32IZFINX-NEXT:    call cosf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -209,7 +209,7 @@ define float @cos_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call cosf at plt
+; RV64IZFINX-NEXT:    call cosf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -218,7 +218,7 @@ define float @cos_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call cosf at plt
+; RV32I-NEXT:    call cosf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -227,7 +227,7 @@ define float @cos_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call cosf at plt
+; RV64I-NEXT:    call cosf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -244,10 +244,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
 ; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
-; RV32IF-NEXT:    call sinf at plt
+; RV32IF-NEXT:    call sinf
 ; RV32IF-NEXT:    fmv.s fs1, fa0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call cosf at plt
+; RV32IF-NEXT:    call cosf
 ; RV32IF-NEXT:    fadd.s fa0, fs1, fa0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
@@ -262,10 +262,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
 ; RV64IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fsw fs1, 0(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs0, fa0
-; RV64IF-NEXT:    call sinf at plt
+; RV64IF-NEXT:    call sinf
 ; RV64IF-NEXT:    fmv.s fs1, fa0
 ; RV64IF-NEXT:    fmv.s fa0, fs0
-; RV64IF-NEXT:    call cosf at plt
+; RV64IF-NEXT:    call cosf
 ; RV64IF-NEXT:    fadd.s fa0, fs1, fa0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
@@ -280,10 +280,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
 ; RV32IZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    mv s0, a0
-; RV32IZFINX-NEXT:    call sinf at plt
+; RV32IZFINX-NEXT:    call sinf
 ; RV32IZFINX-NEXT:    mv s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call cosf at plt
+; RV32IZFINX-NEXT:    call cosf
 ; RV32IZFINX-NEXT:    fadd.s a0, s1, a0
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -298,10 +298,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
 ; RV64IZFINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    mv s0, a0
-; RV64IZFINX-NEXT:    call sinf at plt
+; RV64IZFINX-NEXT:    call sinf
 ; RV64IZFINX-NEXT:    mv s1, a0
 ; RV64IZFINX-NEXT:    mv a0, s0
-; RV64IZFINX-NEXT:    call cosf at plt
+; RV64IZFINX-NEXT:    call cosf
 ; RV64IZFINX-NEXT:    fadd.s a0, s1, a0
 ; RV64IZFINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -316,13 +316,13 @@ define float @sincos_f32(float %a) nounwind strictfp {
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    call sinf at plt
+; RV32I-NEXT:    call sinf
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call cosf at plt
+; RV32I-NEXT:    call cosf
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -336,13 +336,13 @@ define float @sincos_f32(float %a) nounwind strictfp {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    call sinf at plt
+; RV64I-NEXT:    call sinf
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call cosf at plt
+; RV64I-NEXT:    call cosf
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -361,7 +361,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call powf at plt
+; RV32IF-NEXT:    call powf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -370,7 +370,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call powf at plt
+; RV64IF-NEXT:    call powf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -379,7 +379,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call powf at plt
+; RV32IZFINX-NEXT:    call powf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -388,7 +388,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call powf at plt
+; RV64IZFINX-NEXT:    call powf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -397,7 +397,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call powf at plt
+; RV32I-NEXT:    call powf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -406,7 +406,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call powf at plt
+; RV64I-NEXT:    call powf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -421,7 +421,7 @@ define float @exp_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call expf at plt
+; RV32IF-NEXT:    call expf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -430,7 +430,7 @@ define float @exp_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call expf at plt
+; RV64IF-NEXT:    call expf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -439,7 +439,7 @@ define float @exp_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call expf at plt
+; RV32IZFINX-NEXT:    call expf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -448,7 +448,7 @@ define float @exp_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call expf at plt
+; RV64IZFINX-NEXT:    call expf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -457,7 +457,7 @@ define float @exp_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call expf at plt
+; RV32I-NEXT:    call expf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -466,7 +466,7 @@ define float @exp_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call expf at plt
+; RV64I-NEXT:    call expf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -481,7 +481,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call exp2f at plt
+; RV32IF-NEXT:    call exp2f
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -490,7 +490,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call exp2f at plt
+; RV64IF-NEXT:    call exp2f
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -499,7 +499,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call exp2f at plt
+; RV32IZFINX-NEXT:    call exp2f
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -508,7 +508,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call exp2f at plt
+; RV64IZFINX-NEXT:    call exp2f
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -517,7 +517,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call exp2f at plt
+; RV32I-NEXT:    call exp2f
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -526,7 +526,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call exp2f at plt
+; RV64I-NEXT:    call exp2f
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -541,7 +541,7 @@ define float @log_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call logf at plt
+; RV32IF-NEXT:    call logf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -550,7 +550,7 @@ define float @log_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call logf at plt
+; RV64IF-NEXT:    call logf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -559,7 +559,7 @@ define float @log_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call logf at plt
+; RV32IZFINX-NEXT:    call logf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -568,7 +568,7 @@ define float @log_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call logf at plt
+; RV64IZFINX-NEXT:    call logf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -577,7 +577,7 @@ define float @log_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call logf at plt
+; RV32I-NEXT:    call logf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -586,7 +586,7 @@ define float @log_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call logf at plt
+; RV64I-NEXT:    call logf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -601,7 +601,7 @@ define float @log10_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call log10f at plt
+; RV32IF-NEXT:    call log10f
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -610,7 +610,7 @@ define float @log10_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call log10f at plt
+; RV64IF-NEXT:    call log10f
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -619,7 +619,7 @@ define float @log10_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call log10f at plt
+; RV32IZFINX-NEXT:    call log10f
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -628,7 +628,7 @@ define float @log10_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call log10f at plt
+; RV64IZFINX-NEXT:    call log10f
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -637,7 +637,7 @@ define float @log10_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log10f at plt
+; RV32I-NEXT:    call log10f
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -646,7 +646,7 @@ define float @log10_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log10f at plt
+; RV64I-NEXT:    call log10f
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -661,7 +661,7 @@ define float @log2_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call log2f at plt
+; RV32IF-NEXT:    call log2f
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -670,7 +670,7 @@ define float @log2_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call log2f at plt
+; RV64IF-NEXT:    call log2f
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -679,7 +679,7 @@ define float @log2_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call log2f at plt
+; RV32IZFINX-NEXT:    call log2f
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -688,7 +688,7 @@ define float @log2_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call log2f at plt
+; RV64IZFINX-NEXT:    call log2f
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -697,7 +697,7 @@ define float @log2_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log2f at plt
+; RV32I-NEXT:    call log2f
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -706,7 +706,7 @@ define float @log2_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log2f at plt
+; RV64I-NEXT:    call log2f
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -731,7 +731,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -740,7 +740,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -767,9 +767,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -781,9 +781,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -799,7 +799,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call fminf at plt
+; RV32IF-NEXT:    call fminf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -808,7 +808,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call fminf at plt
+; RV64IF-NEXT:    call fminf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -817,7 +817,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call fminf at plt
+; RV32IZFINX-NEXT:    call fminf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -826,7 +826,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call fminf at plt
+; RV64IZFINX-NEXT:    call fminf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -835,7 +835,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fminf at plt
+; RV32I-NEXT:    call fminf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -844,7 +844,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fminf at plt
+; RV64I-NEXT:    call fminf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -859,7 +859,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call fmaxf at plt
+; RV32IF-NEXT:    call fmaxf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -868,7 +868,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call fmaxf at plt
+; RV64IF-NEXT:    call fmaxf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -877,7 +877,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call fmaxf at plt
+; RV32IZFINX-NEXT:    call fmaxf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -886,7 +886,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call fmaxf at plt
+; RV64IZFINX-NEXT:    call fmaxf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -895,7 +895,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaxf at plt
+; RV32I-NEXT:    call fmaxf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -904,7 +904,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaxf at plt
+; RV64I-NEXT:    call fmaxf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -936,7 +936,7 @@ define float @floor_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call floorf at plt
+; RV32IF-NEXT:    call floorf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -945,7 +945,7 @@ define float @floor_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call floorf at plt
+; RV64IF-NEXT:    call floorf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -954,7 +954,7 @@ define float @floor_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call floorf at plt
+; RV32IZFINX-NEXT:    call floorf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -963,7 +963,7 @@ define float @floor_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call floorf at plt
+; RV64IZFINX-NEXT:    call floorf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -972,7 +972,7 @@ define float @floor_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call floorf at plt
+; RV32I-NEXT:    call floorf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -981,7 +981,7 @@ define float @floor_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call floorf at plt
+; RV64I-NEXT:    call floorf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -996,7 +996,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call ceilf at plt
+; RV32IF-NEXT:    call ceilf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1005,7 +1005,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call ceilf at plt
+; RV64IF-NEXT:    call ceilf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -1014,7 +1014,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call ceilf at plt
+; RV32IZFINX-NEXT:    call ceilf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1023,7 +1023,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call ceilf at plt
+; RV64IZFINX-NEXT:    call ceilf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -1032,7 +1032,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call ceilf at plt
+; RV32I-NEXT:    call ceilf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1041,7 +1041,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call ceilf at plt
+; RV64I-NEXT:    call ceilf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1056,7 +1056,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call truncf at plt
+; RV32IF-NEXT:    call truncf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1065,7 +1065,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call truncf at plt
+; RV64IF-NEXT:    call truncf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -1074,7 +1074,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call truncf at plt
+; RV32IZFINX-NEXT:    call truncf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1083,7 +1083,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call truncf at plt
+; RV64IZFINX-NEXT:    call truncf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -1092,7 +1092,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call truncf at plt
+; RV32I-NEXT:    call truncf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1101,7 +1101,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call truncf at plt
+; RV64I-NEXT:    call truncf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1116,7 +1116,7 @@ define float @rint_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call rintf at plt
+; RV32IF-NEXT:    call rintf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1125,7 +1125,7 @@ define float @rint_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call rintf at plt
+; RV64IF-NEXT:    call rintf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -1134,7 +1134,7 @@ define float @rint_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call rintf at plt
+; RV32IZFINX-NEXT:    call rintf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1143,7 +1143,7 @@ define float @rint_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call rintf at plt
+; RV64IZFINX-NEXT:    call rintf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -1152,7 +1152,7 @@ define float @rint_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call rintf at plt
+; RV32I-NEXT:    call rintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1161,7 +1161,7 @@ define float @rint_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call rintf at plt
+; RV64I-NEXT:    call rintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1176,7 +1176,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call nearbyintf at plt
+; RV32IF-NEXT:    call nearbyintf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1185,7 +1185,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call nearbyintf at plt
+; RV64IF-NEXT:    call nearbyintf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -1194,7 +1194,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call nearbyintf at plt
+; RV32IZFINX-NEXT:    call nearbyintf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1203,7 +1203,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call nearbyintf at plt
+; RV64IZFINX-NEXT:    call nearbyintf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -1212,7 +1212,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call nearbyintf at plt
+; RV32I-NEXT:    call nearbyintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1221,7 +1221,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call nearbyintf at plt
+; RV64I-NEXT:    call nearbyintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1236,7 +1236,7 @@ define float @round_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call roundf at plt
+; RV32IF-NEXT:    call roundf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1245,7 +1245,7 @@ define float @round_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call roundf at plt
+; RV64IF-NEXT:    call roundf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -1254,7 +1254,7 @@ define float @round_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call roundf at plt
+; RV32IZFINX-NEXT:    call roundf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1263,7 +1263,7 @@ define float @round_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call roundf at plt
+; RV64IZFINX-NEXT:    call roundf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -1272,7 +1272,7 @@ define float @round_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call roundf at plt
+; RV32I-NEXT:    call roundf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1281,7 +1281,7 @@ define float @round_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call roundf at plt
+; RV64I-NEXT:    call roundf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1296,7 +1296,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call roundevenf at plt
+; RV32IF-NEXT:    call roundevenf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1305,7 +1305,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call roundevenf at plt
+; RV64IF-NEXT:    call roundevenf
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -1314,7 +1314,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call roundevenf at plt
+; RV32IZFINX-NEXT:    call roundevenf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1323,7 +1323,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call roundevenf at plt
+; RV64IZFINX-NEXT:    call roundevenf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -1332,7 +1332,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call roundevenf at plt
+; RV32I-NEXT:    call roundevenf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1341,7 +1341,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call roundevenf at plt
+; RV64I-NEXT:    call roundevenf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1376,7 +1376,7 @@ define iXLen @lrint_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lrintf at plt
+; RV32I-NEXT:    call lrintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1385,7 +1385,7 @@ define iXLen @lrint_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lrintf at plt
+; RV64I-NEXT:    call lrintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1420,7 +1420,7 @@ define iXLen @lround_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lroundf at plt
+; RV32I-NEXT:    call lroundf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1429,7 +1429,7 @@ define iXLen @lround_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lroundf at plt
+; RV64I-NEXT:    call lroundf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1444,7 +1444,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call llrintf at plt
+; RV32IF-NEXT:    call llrintf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1458,7 +1458,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call llrintf at plt
+; RV32IZFINX-NEXT:    call llrintf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1472,7 +1472,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llrintf at plt
+; RV32I-NEXT:    call llrintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1481,7 +1481,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llrintf at plt
+; RV64I-NEXT:    call llrintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1496,7 +1496,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call llroundf at plt
+; RV32IF-NEXT:    call llroundf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1510,7 +1510,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call llroundf at plt
+; RV32IZFINX-NEXT:    call llroundf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1524,7 +1524,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llroundf at plt
+; RV32I-NEXT:    call llroundf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1533,7 +1533,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llroundf at plt
+; RV64I-NEXT:    call llroundf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index e7ec2fdaf93fab..a00d82942cabe8 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -49,7 +49,7 @@ define float @sqrt_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sqrtf at plt
+; RV32I-NEXT:    call sqrtf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -58,7 +58,7 @@ define float @sqrt_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sqrtf at plt
+; RV64I-NEXT:    call sqrtf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -71,18 +71,18 @@ declare float @llvm.powi.f32.i32(float, i32)
 define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV32IF-LABEL: powi_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail __powisf2 at plt
+; RV32IF-NEXT:    tail __powisf2
 ;
 ; RV32IZFINX-LABEL: powi_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail __powisf2 at plt
+; RV32IZFINX-NEXT:    tail __powisf2
 ;
 ; RV64IF-LABEL: powi_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    sext.w a0, a0
-; RV64IF-NEXT:    call __powisf2 at plt
+; RV64IF-NEXT:    call __powisf2
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -92,7 +92,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    sext.w a1, a1
-; RV64IZFINX-NEXT:    call __powisf2 at plt
+; RV64IZFINX-NEXT:    call __powisf2
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -101,7 +101,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __powisf2 at plt
+; RV32I-NEXT:    call __powisf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -111,7 +111,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __powisf2 at plt
+; RV64I-NEXT:    call __powisf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -124,21 +124,21 @@ declare float @llvm.sin.f32(float)
 define float @sin_f32(float %a) nounwind {
 ; RV32IF-LABEL: sin_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail sinf at plt
+; RV32IF-NEXT:    tail sinf
 ;
 ; RV32IZFINX-LABEL: sin_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail sinf at plt
+; RV32IZFINX-NEXT:    tail sinf
 ;
 ; RV64IF-LABEL: sin_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail sinf at plt
+; RV64IF-NEXT:    tail sinf
 ;
 ; RV64IZFINX-LABEL: sin_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call sinf at plt
+; RV64IZFINX-NEXT:    call sinf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -147,7 +147,7 @@ define float @sin_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call sinf at plt
+; RV32I-NEXT:    call sinf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -156,7 +156,7 @@ define float @sin_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call sinf at plt
+; RV64I-NEXT:    call sinf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -169,21 +169,21 @@ declare float @llvm.cos.f32(float)
 define float @cos_f32(float %a) nounwind {
 ; RV32IF-LABEL: cos_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail cosf at plt
+; RV32IF-NEXT:    tail cosf
 ;
 ; RV32IZFINX-LABEL: cos_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail cosf at plt
+; RV32IZFINX-NEXT:    tail cosf
 ;
 ; RV64IF-LABEL: cos_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail cosf at plt
+; RV64IF-NEXT:    tail cosf
 ;
 ; RV64IZFINX-LABEL: cos_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call cosf at plt
+; RV64IZFINX-NEXT:    call cosf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -192,7 +192,7 @@ define float @cos_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call cosf at plt
+; RV32I-NEXT:    call cosf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -201,7 +201,7 @@ define float @cos_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call cosf at plt
+; RV64I-NEXT:    call cosf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -218,10 +218,10 @@ define float @sincos_f32(float %a) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
-; RV32IF-NEXT:    call sinf at plt
+; RV32IF-NEXT:    call sinf
 ; RV32IF-NEXT:    fmv.s fs1, fa0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call cosf at plt
+; RV32IF-NEXT:    call cosf
 ; RV32IF-NEXT:    fadd.s fa0, fs1, fa0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
@@ -236,10 +236,10 @@ define float @sincos_f32(float %a) nounwind {
 ; RV32IZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    mv s0, a0
-; RV32IZFINX-NEXT:    call sinf at plt
+; RV32IZFINX-NEXT:    call sinf
 ; RV32IZFINX-NEXT:    mv s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call cosf at plt
+; RV32IZFINX-NEXT:    call cosf
 ; RV32IZFINX-NEXT:    fadd.s a0, s1, a0
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -254,10 +254,10 @@ define float @sincos_f32(float %a) nounwind {
 ; RV64IZFINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    mv s0, a0
-; RV64IZFINX-NEXT:    call sinf at plt
+; RV64IZFINX-NEXT:    call sinf
 ; RV64IZFINX-NEXT:    mv s1, a0
 ; RV64IZFINX-NEXT:    mv a0, s0
-; RV64IZFINX-NEXT:    call cosf at plt
+; RV64IZFINX-NEXT:    call cosf
 ; RV64IZFINX-NEXT:    fadd.s a0, s1, a0
 ; RV64IZFINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -272,13 +272,13 @@ define float @sincos_f32(float %a) nounwind {
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    call sinf at plt
+; RV32I-NEXT:    call sinf
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call cosf at plt
+; RV32I-NEXT:    call cosf
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -292,13 +292,13 @@ define float @sincos_f32(float %a) nounwind {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    call sinf at plt
+; RV64I-NEXT:    call sinf
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call cosf at plt
+; RV64I-NEXT:    call cosf
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -315,21 +315,21 @@ declare float @llvm.pow.f32(float, float)
 define float @pow_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: pow_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail powf at plt
+; RV32IF-NEXT:    tail powf
 ;
 ; RV32IZFINX-LABEL: pow_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail powf at plt
+; RV32IZFINX-NEXT:    tail powf
 ;
 ; RV64IF-LABEL: pow_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail powf at plt
+; RV64IF-NEXT:    tail powf
 ;
 ; RV64IZFINX-LABEL: pow_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call powf at plt
+; RV64IZFINX-NEXT:    call powf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -338,7 +338,7 @@ define float @pow_f32(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call powf at plt
+; RV32I-NEXT:    call powf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -347,7 +347,7 @@ define float @pow_f32(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call powf at plt
+; RV64I-NEXT:    call powf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -360,21 +360,21 @@ declare float @llvm.exp.f32(float)
 define float @exp_f32(float %a) nounwind {
 ; RV32IF-LABEL: exp_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail expf at plt
+; RV32IF-NEXT:    tail expf
 ;
 ; RV32IZFINX-LABEL: exp_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail expf at plt
+; RV32IZFINX-NEXT:    tail expf
 ;
 ; RV64IF-LABEL: exp_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail expf at plt
+; RV64IF-NEXT:    tail expf
 ;
 ; RV64IZFINX-LABEL: exp_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call expf at plt
+; RV64IZFINX-NEXT:    call expf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -383,7 +383,7 @@ define float @exp_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call expf at plt
+; RV32I-NEXT:    call expf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -392,7 +392,7 @@ define float @exp_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call expf at plt
+; RV64I-NEXT:    call expf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -405,21 +405,21 @@ declare float @llvm.exp2.f32(float)
 define float @exp2_f32(float %a) nounwind {
 ; RV32IF-LABEL: exp2_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail exp2f at plt
+; RV32IF-NEXT:    tail exp2f
 ;
 ; RV32IZFINX-LABEL: exp2_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail exp2f at plt
+; RV32IZFINX-NEXT:    tail exp2f
 ;
 ; RV64IF-LABEL: exp2_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail exp2f at plt
+; RV64IF-NEXT:    tail exp2f
 ;
 ; RV64IZFINX-LABEL: exp2_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call exp2f at plt
+; RV64IZFINX-NEXT:    call exp2f
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -428,7 +428,7 @@ define float @exp2_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call exp2f at plt
+; RV32I-NEXT:    call exp2f
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -437,7 +437,7 @@ define float @exp2_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call exp2f at plt
+; RV64I-NEXT:    call exp2f
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -450,21 +450,21 @@ declare float @llvm.log.f32(float)
 define float @log_f32(float %a) nounwind {
 ; RV32IF-LABEL: log_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail logf at plt
+; RV32IF-NEXT:    tail logf
 ;
 ; RV32IZFINX-LABEL: log_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail logf at plt
+; RV32IZFINX-NEXT:    tail logf
 ;
 ; RV64IF-LABEL: log_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail logf at plt
+; RV64IF-NEXT:    tail logf
 ;
 ; RV64IZFINX-LABEL: log_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call logf at plt
+; RV64IZFINX-NEXT:    call logf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -473,7 +473,7 @@ define float @log_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call logf at plt
+; RV32I-NEXT:    call logf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -482,7 +482,7 @@ define float @log_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call logf at plt
+; RV64I-NEXT:    call logf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -495,21 +495,21 @@ declare float @llvm.log10.f32(float)
 define float @log10_f32(float %a) nounwind {
 ; RV32IF-LABEL: log10_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail log10f at plt
+; RV32IF-NEXT:    tail log10f
 ;
 ; RV32IZFINX-LABEL: log10_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail log10f at plt
+; RV32IZFINX-NEXT:    tail log10f
 ;
 ; RV64IF-LABEL: log10_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail log10f at plt
+; RV64IF-NEXT:    tail log10f
 ;
 ; RV64IZFINX-LABEL: log10_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call log10f at plt
+; RV64IZFINX-NEXT:    call log10f
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -518,7 +518,7 @@ define float @log10_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log10f at plt
+; RV32I-NEXT:    call log10f
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -527,7 +527,7 @@ define float @log10_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log10f at plt
+; RV64I-NEXT:    call log10f
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -540,21 +540,21 @@ declare float @llvm.log2.f32(float)
 define float @log2_f32(float %a) nounwind {
 ; RV32IF-LABEL: log2_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail log2f at plt
+; RV32IF-NEXT:    tail log2f
 ;
 ; RV32IZFINX-LABEL: log2_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail log2f at plt
+; RV32IZFINX-NEXT:    tail log2f
 ;
 ; RV64IF-LABEL: log2_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail log2f at plt
+; RV64IF-NEXT:    tail log2f
 ;
 ; RV64IZFINX-LABEL: log2_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call log2f at plt
+; RV64IZFINX-NEXT:    call log2f
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -563,7 +563,7 @@ define float @log2_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call log2f at plt
+; RV32I-NEXT:    call log2f
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -572,7 +572,7 @@ define float @log2_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call log2f at plt
+; RV64I-NEXT:    call log2f
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -607,7 +607,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaf at plt
+; RV32I-NEXT:    call fmaf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -616,7 +616,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    call fmaf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -653,9 +653,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
-; RV32I-NEXT:    call __mulsf3 at plt
+; RV32I-NEXT:    call __mulsf3
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -667,9 +667,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
-; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    call __mulsf3
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -745,7 +745,7 @@ define float @minnum_f32(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fminf at plt
+; RV32I-NEXT:    call fminf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -754,7 +754,7 @@ define float @minnum_f32(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fminf at plt
+; RV64I-NEXT:    call fminf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -789,7 +789,7 @@ define float @maxnum_f32(float %a, float %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call fmaxf at plt
+; RV32I-NEXT:    call fmaxf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -798,7 +798,7 @@ define float @maxnum_f32(float %a, float %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call fmaxf at plt
+; RV64I-NEXT:    call fmaxf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -928,7 +928,7 @@ define float @floor_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call floorf at plt
+; RV32I-NEXT:    call floorf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -937,7 +937,7 @@ define float @floor_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call floorf at plt
+; RV64I-NEXT:    call floorf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1006,7 +1006,7 @@ define float @ceil_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call ceilf at plt
+; RV32I-NEXT:    call ceilf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1015,7 +1015,7 @@ define float @ceil_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call ceilf at plt
+; RV64I-NEXT:    call ceilf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1084,7 +1084,7 @@ define float @trunc_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call truncf at plt
+; RV32I-NEXT:    call truncf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1093,7 +1093,7 @@ define float @trunc_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call truncf at plt
+; RV64I-NEXT:    call truncf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1162,7 +1162,7 @@ define float @rint_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call rintf at plt
+; RV32I-NEXT:    call rintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1171,7 +1171,7 @@ define float @rint_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call rintf at plt
+; RV64I-NEXT:    call rintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1184,21 +1184,21 @@ declare float @llvm.nearbyint.f32(float)
 define float @nearbyint_f32(float %a) nounwind {
 ; RV32IF-LABEL: nearbyint_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    tail nearbyintf at plt
+; RV32IF-NEXT:    tail nearbyintf
 ;
 ; RV32IZFINX-LABEL: nearbyint_f32:
 ; RV32IZFINX:       # %bb.0:
-; RV32IZFINX-NEXT:    tail nearbyintf at plt
+; RV32IZFINX-NEXT:    tail nearbyintf
 ;
 ; RV64IF-LABEL: nearbyint_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    tail nearbyintf at plt
+; RV64IF-NEXT:    tail nearbyintf
 ;
 ; RV64IZFINX-LABEL: nearbyint_f32:
 ; RV64IZFINX:       # %bb.0:
 ; RV64IZFINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT:    call nearbyintf at plt
+; RV64IZFINX-NEXT:    call nearbyintf
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
@@ -1207,7 +1207,7 @@ define float @nearbyint_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call nearbyintf at plt
+; RV32I-NEXT:    call nearbyintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1216,7 +1216,7 @@ define float @nearbyint_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call nearbyintf at plt
+; RV64I-NEXT:    call nearbyintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1285,7 +1285,7 @@ define float @round_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call roundf at plt
+; RV32I-NEXT:    call roundf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1294,7 +1294,7 @@ define float @round_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call roundf at plt
+; RV64I-NEXT:    call roundf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1363,7 +1363,7 @@ define float @roundeven_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call roundevenf at plt
+; RV32I-NEXT:    call roundevenf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1372,7 +1372,7 @@ define float @roundeven_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call roundevenf at plt
+; RV64I-NEXT:    call roundevenf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1407,7 +1407,7 @@ define iXLen @lrint_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lrintf at plt
+; RV32I-NEXT:    call lrintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1416,7 +1416,7 @@ define iXLen @lrint_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lrintf at plt
+; RV64I-NEXT:    call lrintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1452,7 +1452,7 @@ define iXLen @lround_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lroundf at plt
+; RV32I-NEXT:    call lroundf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1461,7 +1461,7 @@ define iXLen @lround_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lroundf at plt
+; RV64I-NEXT:    call lroundf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1496,7 +1496,7 @@ define i32 @lround_i32_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lroundf at plt
+; RV32I-NEXT:    call lroundf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1505,7 +1505,7 @@ define i32 @lround_i32_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lroundf at plt
+; RV64I-NEXT:    call lroundf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1520,7 +1520,7 @@ define i64 @llrint_f32(float %a) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call llrintf at plt
+; RV32IF-NEXT:    call llrintf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1529,7 +1529,7 @@ define i64 @llrint_f32(float %a) nounwind {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call llrintf at plt
+; RV32IZFINX-NEXT:    call llrintf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1548,7 +1548,7 @@ define i64 @llrint_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llrintf at plt
+; RV32I-NEXT:    call llrintf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1557,7 +1557,7 @@ define i64 @llrint_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llrintf at plt
+; RV64I-NEXT:    call llrintf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1572,7 +1572,7 @@ define i64 @llround_f32(float %a) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call llroundf at plt
+; RV32IF-NEXT:    call llroundf
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1581,7 +1581,7 @@ define i64 @llround_f32(float %a) nounwind {
 ; RV32IZFINX:       # %bb.0:
 ; RV32IZFINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT:    call llroundf at plt
+; RV32IZFINX-NEXT:    call llroundf
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1600,7 +1600,7 @@ define i64 @llround_f32(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llroundf at plt
+; RV32I-NEXT:    call llroundf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1609,7 +1609,7 @@ define i64 @llround_f32(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llroundf at plt
+; RV64I-NEXT:    call llroundf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index b5d5f8e7c7e693..3779d39a753e1e 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -142,7 +142,7 @@ define dso_local float @flw_stack(float %a) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    addi a0, sp, 4
-; RV32IF-NEXT:    call notdead at plt
+; RV32IF-NEXT:    call notdead
 ; RV32IF-NEXT:    flw fa5, 4(sp)
 ; RV32IF-NEXT:    fadd.s fa0, fa5, fs0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -157,7 +157,7 @@ define dso_local float @flw_stack(float %a) nounwind {
 ; RV64IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs0, fa0
 ; RV64IF-NEXT:    mv a0, sp
-; RV64IF-NEXT:    call notdead at plt
+; RV64IF-NEXT:    call notdead
 ; RV64IF-NEXT:    flw fa5, 0(sp)
 ; RV64IF-NEXT:    fadd.s fa0, fa5, fs0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -172,7 +172,7 @@ define dso_local float @flw_stack(float %a) nounwind {
 ; RV32IZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    mv s0, a0
 ; RV32IZFINX-NEXT:    addi a0, sp, 4
-; RV32IZFINX-NEXT:    call notdead at plt
+; RV32IZFINX-NEXT:    call notdead
 ; RV32IZFINX-NEXT:    lw a0, 4(sp)
 ; RV32IZFINX-NEXT:    fadd.s a0, a0, s0
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -187,7 +187,7 @@ define dso_local float @flw_stack(float %a) nounwind {
 ; RV64IZFINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZFINX-NEXT:    mv s0, a0
 ; RV64IZFINX-NEXT:    addi a0, sp, 12
-; RV64IZFINX-NEXT:    call notdead at plt
+; RV64IZFINX-NEXT:    call notdead
 ; RV64IZFINX-NEXT:    lw a0, 12(sp)
 ; RV64IZFINX-NEXT:    fadd.s a0, a0, s0
 ; RV64IZFINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -209,7 +209,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fadd.s fa5, fa0, fa1
 ; RV32IF-NEXT:    fsw fa5, 8(sp)
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call notdead at plt
+; RV32IF-NEXT:    call notdead
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -221,7 +221,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fadd.s fa5, fa0, fa1
 ; RV64IF-NEXT:    fsw fa5, 4(sp)
 ; RV64IF-NEXT:    addi a0, sp, 4
-; RV64IF-NEXT:    call notdead at plt
+; RV64IF-NEXT:    call notdead
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -233,7 +233,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
 ; RV32IZFINX-NEXT:    fadd.s a0, a0, a1
 ; RV32IZFINX-NEXT:    sw a0, 8(sp)
 ; RV32IZFINX-NEXT:    addi a0, sp, 8
-; RV32IZFINX-NEXT:    call notdead at plt
+; RV32IZFINX-NEXT:    call notdead
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -245,7 +245,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
 ; RV64IZFINX-NEXT:    fadd.s a0, a0, a1
 ; RV64IZFINX-NEXT:    sw a0, 4(sp)
 ; RV64IZFINX-NEXT:    addi a0, sp, 4
-; RV64IZFINX-NEXT:    call notdead at plt
+; RV64IZFINX-NEXT:    call notdead
 ; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINX-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index d947d0f25cdd8f..5e99c7eb905628 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -53,7 +53,7 @@ define i64 @test_floor_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    beqz s0, .LBB1_4
@@ -111,7 +111,7 @@ define i64 @test_floor_si64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZFINX-NEXT:    neg s2, s1
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI1_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI1_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s2, a0
@@ -197,7 +197,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI3_0)
 ; RV32IF-NEXT:    flw fa5, %lo(.LCPI3_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
@@ -240,7 +240,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZFINX-NEXT:    neg s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI3_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI3_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s1, a0
@@ -313,7 +313,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    beqz s0, .LBB5_4
@@ -371,7 +371,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZFINX-NEXT:    neg s2, s1
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI5_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI5_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s2, a0
@@ -457,7 +457,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI7_0)
 ; RV32IF-NEXT:    flw fa5, %lo(.LCPI7_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
@@ -500,7 +500,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZFINX-NEXT:    neg s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI7_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI7_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s1, a0
@@ -573,7 +573,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    beqz s0, .LBB9_4
@@ -631,7 +631,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZFINX-NEXT:    neg s2, s1
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI9_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI9_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s2, a0
@@ -717,7 +717,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI11_0)
 ; RV32IF-NEXT:    flw fa5, %lo(.LCPI11_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
@@ -760,7 +760,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZFINX-NEXT:    neg s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI11_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI11_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s1, a0
@@ -833,7 +833,7 @@ define i64 @test_round_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    beqz s0, .LBB13_4
@@ -891,7 +891,7 @@ define i64 @test_round_si64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZFINX-NEXT:    neg s2, s1
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI13_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI13_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s2, a0
@@ -977,7 +977,7 @@ define i64 @test_round_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI15_0)
 ; RV32IF-NEXT:    flw fa5, %lo(.LCPI15_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
@@ -1020,7 +1020,7 @@ define i64 @test_round_ui64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZFINX-NEXT:    neg s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI15_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI15_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s1, a0
@@ -1093,7 +1093,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    beqz s0, .LBB17_4
@@ -1151,7 +1151,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZFINX-NEXT:    neg s2, s1
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI17_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI17_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s2, a0
@@ -1237,7 +1237,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI19_0)
 ; RV32IF-NEXT:    flw fa5, %lo(.LCPI19_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
@@ -1280,7 +1280,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZFINX-NEXT:    neg s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI19_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI19_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s1, a0
@@ -1353,7 +1353,7 @@ define i64 @test_rint_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fmv.w.x fa5, a0
 ; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    beqz s0, .LBB21_4
@@ -1411,7 +1411,7 @@ define i64 @test_rint_si64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZFINX-NEXT:    neg s2, s1
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI21_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI21_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s2, a0
@@ -1497,7 +1497,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI23_0)
 ; RV32IF-NEXT:    flw fa5, %lo(.LCPI23_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
@@ -1540,7 +1540,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
 ; RV32IZFINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZFINX-NEXT:    neg s1, a0
 ; RV32IZFINX-NEXT:    mv a0, s0
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lui a2, %hi(.LCPI23_0)
 ; RV32IZFINX-NEXT:    lw a2, %lo(.LCPI23_0)(a2)
 ; RV32IZFINX-NEXT:    and a0, s1, a0
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll
index ed50f867cdb8bb..1b134484397528 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll
@@ -100,7 +100,7 @@ define i64 @test_floor_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -125,7 +125,7 @@ define i64 @test_floor_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -231,7 +231,7 @@ define i64 @test_floor_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -256,7 +256,7 @@ define i64 @test_floor_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -362,7 +362,7 @@ define i64 @test_ceil_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -387,7 +387,7 @@ define i64 @test_ceil_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -493,7 +493,7 @@ define i64 @test_ceil_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -518,7 +518,7 @@ define i64 @test_ceil_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -624,7 +624,7 @@ define i64 @test_trunc_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -649,7 +649,7 @@ define i64 @test_trunc_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -755,7 +755,7 @@ define i64 @test_trunc_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -780,7 +780,7 @@ define i64 @test_trunc_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -886,7 +886,7 @@ define i64 @test_round_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -911,7 +911,7 @@ define i64 @test_round_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1017,7 +1017,7 @@ define i64 @test_round_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1042,7 +1042,7 @@ define i64 @test_round_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1148,7 +1148,7 @@ define i64 @test_roundeven_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1173,7 +1173,7 @@ define i64 @test_roundeven_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixsfdi at plt
+; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
@@ -1279,7 +1279,7 @@ define i64 @test_roundeven_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -1304,7 +1304,7 @@ define i64 @test_roundeven_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
-; RV32IZFINX-NEXT:    call __fixunssfdi at plt
+; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINX-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-zfa.ll b/llvm/test/CodeGen/RISCV/float-zfa.ll
index 52c9ac7333fc51..e5196ead1f8819 100644
--- a/llvm/test/CodeGen/RISCV/float-zfa.ll
+++ b/llvm/test/CodeGen/RISCV/float-zfa.ll
@@ -265,7 +265,7 @@ define void @fli_remat() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fli.s fa0, 1.0
 ; CHECK-NEXT:    fli.s fa1, 1.0
-; CHECK-NEXT:    tail foo at plt
+; CHECK-NEXT:    tail foo
   tail call void @foo(float 1.000000e+00, float 1.000000e+00)
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/fmax-fmin.ll b/llvm/test/CodeGen/RISCV/fmax-fmin.ll
index b67093d72439c6..9d5729802a0ff7 100644
--- a/llvm/test/CodeGen/RISCV/fmax-fmin.ll
+++ b/llvm/test/CodeGen/RISCV/fmax-fmin.ll
@@ -7,7 +7,7 @@ define float @maxnum_f32(float %x, float %y) nounwind {
 ; R32:       # %bb.0:
 ; R32-NEXT:    addi sp, sp, -16
 ; R32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT:    call fmaxf at plt
+; R32-NEXT:    call fmaxf
 ; R32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; R32-NEXT:    addi sp, sp, 16
 ; R32-NEXT:    ret
@@ -16,7 +16,7 @@ define float @maxnum_f32(float %x, float %y) nounwind {
 ; R64:       # %bb.0:
 ; R64-NEXT:    addi sp, sp, -16
 ; R64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT:    call fmaxf at plt
+; R64-NEXT:    call fmaxf
 ; R64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; R64-NEXT:    addi sp, sp, 16
 ; R64-NEXT:    ret
@@ -33,7 +33,7 @@ define float @maxnum_f32_fast(float %x, float %y) nounwind {
 ; R32-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; R32-NEXT:    mv s1, a1
 ; R32-NEXT:    mv s0, a0
-; R32-NEXT:    call __gtsf2 at plt
+; R32-NEXT:    call __gtsf2
 ; R32-NEXT:    bgtz a0, .LBB1_2
 ; R32-NEXT:  # %bb.1:
 ; R32-NEXT:    mv s0, s1
@@ -53,7 +53,7 @@ define float @maxnum_f32_fast(float %x, float %y) nounwind {
 ; R64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; R64-NEXT:    mv s1, a1
 ; R64-NEXT:    mv s0, a0
-; R64-NEXT:    call __gtsf2 at plt
+; R64-NEXT:    call __gtsf2
 ; R64-NEXT:    bgtz a0, .LBB1_2
 ; R64-NEXT:  # %bb.1:
 ; R64-NEXT:    mv s0, s1
@@ -73,7 +73,7 @@ define double @maxnum_f64(double %x, double %y) nounwind {
 ; R32:       # %bb.0:
 ; R32-NEXT:    addi sp, sp, -16
 ; R32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT:    call fmax at plt
+; R32-NEXT:    call fmax
 ; R32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; R32-NEXT:    addi sp, sp, 16
 ; R32-NEXT:    ret
@@ -82,7 +82,7 @@ define double @maxnum_f64(double %x, double %y) nounwind {
 ; R64:       # %bb.0:
 ; R64-NEXT:    addi sp, sp, -16
 ; R64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT:    call fmax at plt
+; R64-NEXT:    call fmax
 ; R64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; R64-NEXT:    addi sp, sp, 16
 ; R64-NEXT:    ret
@@ -103,7 +103,7 @@ define double @maxnum_f64_nnan(double %x, double %y) nounwind {
 ; R32-NEXT:    mv s2, a2
 ; R32-NEXT:    mv s0, a1
 ; R32-NEXT:    mv s3, a0
-; R32-NEXT:    call __gtdf2 at plt
+; R32-NEXT:    call __gtdf2
 ; R32-NEXT:    mv a1, a0
 ; R32-NEXT:    mv a0, s3
 ; R32-NEXT:    bgtz a1, .LBB3_2
@@ -113,7 +113,7 @@ define double @maxnum_f64_nnan(double %x, double %y) nounwind {
 ; R32-NEXT:    mv a1, s0
 ; R32-NEXT:    mv a2, s2
 ; R32-NEXT:    mv a3, s1
-; R32-NEXT:    call __gtdf2 at plt
+; R32-NEXT:    call __gtdf2
 ; R32-NEXT:    bgtz a0, .LBB3_4
 ; R32-NEXT:  # %bb.3:
 ; R32-NEXT:    mv s0, s1
@@ -136,7 +136,7 @@ define double @maxnum_f64_nnan(double %x, double %y) nounwind {
 ; R64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; R64-NEXT:    mv s1, a1
 ; R64-NEXT:    mv s0, a0
-; R64-NEXT:    call __gtdf2 at plt
+; R64-NEXT:    call __gtdf2
 ; R64-NEXT:    bgtz a0, .LBB3_2
 ; R64-NEXT:  # %bb.1:
 ; R64-NEXT:    mv s0, s1
@@ -156,7 +156,7 @@ define float @minnum_f32(float %x, float %y) nounwind {
 ; R32:       # %bb.0:
 ; R32-NEXT:    addi sp, sp, -16
 ; R32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT:    call fminf at plt
+; R32-NEXT:    call fminf
 ; R32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; R32-NEXT:    addi sp, sp, 16
 ; R32-NEXT:    ret
@@ -165,7 +165,7 @@ define float @minnum_f32(float %x, float %y) nounwind {
 ; R64:       # %bb.0:
 ; R64-NEXT:    addi sp, sp, -16
 ; R64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT:    call fminf at plt
+; R64-NEXT:    call fminf
 ; R64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; R64-NEXT:    addi sp, sp, 16
 ; R64-NEXT:    ret
@@ -182,7 +182,7 @@ define float @minnum_f32_nnan(float %x, float %y) nounwind {
 ; R32-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; R32-NEXT:    mv s1, a1
 ; R32-NEXT:    mv s0, a0
-; R32-NEXT:    call __ltsf2 at plt
+; R32-NEXT:    call __ltsf2
 ; R32-NEXT:    bltz a0, .LBB5_2
 ; R32-NEXT:  # %bb.1:
 ; R32-NEXT:    mv s0, s1
@@ -202,7 +202,7 @@ define float @minnum_f32_nnan(float %x, float %y) nounwind {
 ; R64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; R64-NEXT:    mv s1, a1
 ; R64-NEXT:    mv s0, a0
-; R64-NEXT:    call __ltsf2 at plt
+; R64-NEXT:    call __ltsf2
 ; R64-NEXT:    bltz a0, .LBB5_2
 ; R64-NEXT:  # %bb.1:
 ; R64-NEXT:    mv s0, s1
@@ -222,7 +222,7 @@ define double @minnum_f64(double %x, double %y) nounwind {
 ; R32:       # %bb.0:
 ; R32-NEXT:    addi sp, sp, -16
 ; R32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT:    call fmin at plt
+; R32-NEXT:    call fmin
 ; R32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; R32-NEXT:    addi sp, sp, 16
 ; R32-NEXT:    ret
@@ -231,7 +231,7 @@ define double @minnum_f64(double %x, double %y) nounwind {
 ; R64:       # %bb.0:
 ; R64-NEXT:    addi sp, sp, -16
 ; R64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT:    call fmin at plt
+; R64-NEXT:    call fmin
 ; R64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; R64-NEXT:    addi sp, sp, 16
 ; R64-NEXT:    ret
@@ -252,7 +252,7 @@ define double @minnum_f64_fast(double %x, double %y) nounwind {
 ; R32-NEXT:    mv s2, a2
 ; R32-NEXT:    mv s0, a1
 ; R32-NEXT:    mv s3, a0
-; R32-NEXT:    call __ltdf2 at plt
+; R32-NEXT:    call __ltdf2
 ; R32-NEXT:    mv a1, a0
 ; R32-NEXT:    mv a0, s3
 ; R32-NEXT:    bltz a1, .LBB7_2
@@ -262,7 +262,7 @@ define double @minnum_f64_fast(double %x, double %y) nounwind {
 ; R32-NEXT:    mv a1, s0
 ; R32-NEXT:    mv a2, s2
 ; R32-NEXT:    mv a3, s1
-; R32-NEXT:    call __ltdf2 at plt
+; R32-NEXT:    call __ltdf2
 ; R32-NEXT:    bltz a0, .LBB7_4
 ; R32-NEXT:  # %bb.3:
 ; R32-NEXT:    mv s0, s1
@@ -285,7 +285,7 @@ define double @minnum_f64_fast(double %x, double %y) nounwind {
 ; R64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; R64-NEXT:    mv s1, a1
 ; R64-NEXT:    mv s0, a0
-; R64-NEXT:    call __ltdf2 at plt
+; R64-NEXT:    call __ltdf2
 ; R64-NEXT:    bltz a0, .LBB7_2
 ; R64-NEXT:  # %bb.1:
 ; R64-NEXT:    mv s0, s1
diff --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
index 321857b2104eb5..7c2f775bca14e6 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -773,7 +773,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
 ; RV32I-NEXT:  .LBB20_5: # %for.body
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call f at plt
+; RV32I-NEXT:    call f
 ; RV32I-NEXT:    lw a0, 12(s7)
 ; RV32I-NEXT:    lw a1, 8(s7)
 ; RV32I-NEXT:    add a0, a0, s4
@@ -838,7 +838,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
 ; RV32I-MEDIUM-NEXT:  .LBB20_5: # %for.body
 ; RV32I-MEDIUM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-MEDIUM-NEXT:    mv a0, s0
-; RV32I-MEDIUM-NEXT:    call f at plt
+; RV32I-MEDIUM-NEXT:    call f
 ; RV32I-MEDIUM-NEXT:    lw a0, 12(s7)
 ; RV32I-MEDIUM-NEXT:    lw a1, 8(s7)
 ; RV32I-MEDIUM-NEXT:    add a0, a0, s4
@@ -885,7 +885,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
 ; RV64I-NEXT:  .LBB20_2: # %for.body
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call f at plt
+; RV64I-NEXT:    call f
 ; RV64I-NEXT:    ld a0, 8(s3)
 ; RV64I-NEXT:    addi s1, s1, -1
 ; RV64I-NEXT:    add s2, a0, s2
@@ -921,7 +921,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
 ; RV64I-MEDIUM-NEXT:  .LBB20_2: # %for.body
 ; RV64I-MEDIUM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-MEDIUM-NEXT:    mv a0, s0
-; RV64I-MEDIUM-NEXT:    call f at plt
+; RV64I-MEDIUM-NEXT:    call f
 ; RV64I-MEDIUM-NEXT:    ld a0, 8(s3)
 ; RV64I-MEDIUM-NEXT:    addi s1, s1, -1
 ; RV64I-MEDIUM-NEXT:    add s2, a0, s2
diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll
index f2079e314d51c1..f6a53a9d76dd35 100644
--- a/llvm/test/CodeGen/RISCV/forced-atomics.ll
+++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll
@@ -14,7 +14,7 @@ define i8 @load8(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_load_1 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_load_1
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -38,7 +38,7 @@ define i8 @load8(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_1 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_1
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -67,7 +67,7 @@ define void @store8(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_store_1 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_store_1
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -91,7 +91,7 @@ define void @store8(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_1 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_1
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -119,7 +119,7 @@ define i8 @rmw8(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_1
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -129,7 +129,7 @@ define i8 @rmw8(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_1 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_1
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -139,7 +139,7 @@ define i8 @rmw8(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_1 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_1
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -150,7 +150,7 @@ define i8 @rmw8(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_1
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -160,7 +160,7 @@ define i8 @rmw8(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_1 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_1
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -170,7 +170,7 @@ define i8 @rmw8(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_1 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_1
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -188,7 +188,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a2, 1
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_1
 ; RV32-NO-ATOMIC-NEXT:    lbu a0, 11(sp)
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -200,7 +200,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a2, 1
 ; RV32-ATOMIC-NEXT:    li a1, 0
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_1 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_1
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -211,7 +211,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_1 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_1
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -225,7 +225,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a2, 1
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_1
 ; RV64-NO-ATOMIC-NEXT:    lbu a0, 7(sp)
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -237,7 +237,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a2, 1
 ; RV64-ATOMIC-NEXT:    li a1, 0
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_1 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_1
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -248,7 +248,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_1 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_1
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -263,7 +263,7 @@ define i16 @load16(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_load_2 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_load_2
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -287,7 +287,7 @@ define i16 @load16(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_2 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_2
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -316,7 +316,7 @@ define void @store16(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_store_2 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_store_2
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -340,7 +340,7 @@ define void @store16(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_2 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_2
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -368,7 +368,7 @@ define i16 @rmw16(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_2
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -378,7 +378,7 @@ define i16 @rmw16(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_2 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_2
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -388,7 +388,7 @@ define i16 @rmw16(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_2 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_2
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -399,7 +399,7 @@ define i16 @rmw16(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_2
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -409,7 +409,7 @@ define i16 @rmw16(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_2 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_2
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -419,7 +419,7 @@ define i16 @rmw16(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_2 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_2
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -437,7 +437,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a2, 1
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_2
 ; RV32-NO-ATOMIC-NEXT:    lh a0, 10(sp)
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -449,7 +449,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a2, 1
 ; RV32-ATOMIC-NEXT:    li a1, 0
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_2 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_2
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -460,7 +460,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_2 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_2
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -474,7 +474,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a2, 1
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_2
 ; RV64-NO-ATOMIC-NEXT:    lh a0, 6(sp)
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -486,7 +486,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a2, 1
 ; RV64-ATOMIC-NEXT:    li a1, 0
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_2 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_2
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -497,7 +497,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_2 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_2
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -512,7 +512,7 @@ define i32 @load32_unordered(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -532,7 +532,7 @@ define i32 @load32_unordered(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -556,7 +556,7 @@ define i32 @load32_monotonic(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -576,7 +576,7 @@ define i32 @load32_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -600,7 +600,7 @@ define i32 @load32_acquire(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 2
-; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -622,7 +622,7 @@ define i32 @load32_acquire(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 2
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -648,7 +648,7 @@ define i32 @load32_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -672,7 +672,7 @@ define i32 @load32_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -701,7 +701,7 @@ define void @store32_unordered(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
 ; RV32-NO-ATOMIC-NEXT:    li a2, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -722,7 +722,7 @@ define void @store32_unordered(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
 ; RV64-NO-ATOMIC-NEXT:    li a2, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -747,7 +747,7 @@ define void @store32_monotonic(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
 ; RV32-NO-ATOMIC-NEXT:    li a2, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -768,7 +768,7 @@ define void @store32_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
 ; RV64-NO-ATOMIC-NEXT:    li a2, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -793,7 +793,7 @@ define void @store32_release(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a2, 3
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -816,7 +816,7 @@ define void @store32_release(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a2, 3
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -843,7 +843,7 @@ define void @store32_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
 ; RV32-NO-ATOMIC-NEXT:    li a1, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -867,7 +867,7 @@ define void @store32_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -895,7 +895,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -905,7 +905,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -915,7 +915,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -926,7 +926,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -936,7 +936,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -946,7 +946,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -961,7 +961,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -971,7 +971,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_add_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -981,7 +981,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -992,7 +992,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -1002,7 +1002,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1012,7 +1012,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1027,7 +1027,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_sub_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -1037,7 +1037,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_sub_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_sub_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1047,7 +1047,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_sub_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_sub_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1058,7 +1058,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_sub_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -1068,7 +1068,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_sub_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_sub_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1078,7 +1078,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_sub_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_sub_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1093,7 +1093,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_and_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -1103,7 +1103,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_and_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_and_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1113,7 +1113,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_and_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_and_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1124,7 +1124,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_and_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -1134,7 +1134,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_and_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_and_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1144,7 +1144,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_and_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_and_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1159,7 +1159,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_nand_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -1169,7 +1169,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_nand_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_nand_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1179,7 +1179,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_nand_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_nand_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1190,7 +1190,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_nand_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -1200,7 +1200,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_nand_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_nand_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1210,7 +1210,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_nand_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_nand_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1225,7 +1225,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_or_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -1235,7 +1235,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_or_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_or_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1245,7 +1245,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_or_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_or_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1256,7 +1256,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_or_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -1266,7 +1266,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_or_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_or_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1276,7 +1276,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_or_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_or_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1291,7 +1291,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_fetch_xor_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -1301,7 +1301,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_xor_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_xor_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1311,7 +1311,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_xor_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_xor_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1322,7 +1322,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_xor_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -1332,7 +1332,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_xor_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_xor_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1342,7 +1342,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_xor_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_xor_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1366,7 +1366,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw a1, 4(sp)
 ; RV32-NO-ATOMIC-NEXT:    bnez a0, .LBB23_4
 ; RV32-NO-ATOMIC-NEXT:  .LBB23_2: # %atomicrmw.start
@@ -1389,7 +1389,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_max_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_max_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1399,7 +1399,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_max_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_max_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1419,7 +1419,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw a1, 12(sp)
 ; RV64-NO-ATOMIC-NEXT:    bnez a0, .LBB23_4
 ; RV64-NO-ATOMIC-NEXT:  .LBB23_2: # %atomicrmw.start
@@ -1443,7 +1443,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_max_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_max_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1453,7 +1453,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_max_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_max_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1479,7 +1479,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw a1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    bnez a0, .LBB24_4
 ; RV32-NO-ATOMIC-NEXT:  .LBB24_2: # %atomicrmw.start
@@ -1503,7 +1503,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_min_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_min_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1513,7 +1513,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_min_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_min_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1535,7 +1535,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw a1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    bnez a0, .LBB24_4
 ; RV64-NO-ATOMIC-NEXT:  .LBB24_2: # %atomicrmw.start
@@ -1559,7 +1559,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_min_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_min_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1569,7 +1569,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_min_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_min_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1594,7 +1594,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw a1, 4(sp)
 ; RV32-NO-ATOMIC-NEXT:    beqz a0, .LBB25_1
 ; RV32-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -1609,7 +1609,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_umax_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_umax_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1619,7 +1619,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umax_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umax_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1639,7 +1639,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw a1, 12(sp)
 ; RV64-NO-ATOMIC-NEXT:    bnez a0, .LBB25_4
 ; RV64-NO-ATOMIC-NEXT:  .LBB25_2: # %atomicrmw.start
@@ -1663,7 +1663,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umax_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umax_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1673,7 +1673,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umax_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umax_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1699,7 +1699,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw a1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    bnez a0, .LBB26_4
 ; RV32-NO-ATOMIC-NEXT:  .LBB26_2: # %atomicrmw.start
@@ -1723,7 +1723,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_fetch_and_umin_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_fetch_and_umin_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1733,7 +1733,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umin_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umin_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1755,7 +1755,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw a1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    bnez a0, .LBB26_4
 ; RV64-NO-ATOMIC-NEXT:  .LBB26_2: # %atomicrmw.start
@@ -1779,7 +1779,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umin_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umin_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1789,7 +1789,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umin_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umin_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1804,7 +1804,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NO-ATOMIC-NEXT:    li a1, 1
 ; RV32-NO-ATOMIC-NEXT:    li a2, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-NO-ATOMIC-NEXT:    ret
@@ -1814,7 +1814,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a1, 1
-; RV32-ATOMIC-NEXT:    call __sync_lock_test_and_set_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_lock_test_and_set_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -1824,7 +1824,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_lock_test_and_set_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_lock_test_and_set_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -1835,7 +1835,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -1845,7 +1845,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_lock_test_and_set_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_lock_test_and_set_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -1855,7 +1855,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_lock_test_and_set_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_lock_test_and_set_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -1876,14 +1876,14 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NO-ATOMIC-NEXT:    lui a1, 260096
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s1
-; RV32-NO-ATOMIC-NEXT:    call __addsf3 at plt
+; RV32-NO-ATOMIC-NEXT:    call __addsf3
 ; RV32-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV32-NO-ATOMIC-NEXT:    sw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    mv a1, sp
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    beqz a0, .LBB28_1
 ; RV32-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -1906,11 +1906,11 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-NEXT:    mv s1, a0
 ; RV32-ATOMIC-NEXT:    lui a1, 260096
-; RV32-ATOMIC-NEXT:    call __addsf3 at plt
+; RV32-ATOMIC-NEXT:    call __addsf3
 ; RV32-ATOMIC-NEXT:    mv a2, a0
 ; RV32-ATOMIC-NEXT:    mv a0, s0
 ; RV32-ATOMIC-NEXT:    mv a1, s1
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-NEXT:    bne a0, s1, .LBB28_1
 ; RV32-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1931,11 +1931,11 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    lui a1, 260096
-; RV32-ATOMIC-TRAILING-NEXT:    call __addsf3 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __addsf3
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-TRAILING-NEXT:    bne a0, s1, .LBB28_1
 ; RV32-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1956,14 +1956,14 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    lui a1, 260096
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s1
-; RV64-NO-ATOMIC-NEXT:    call __addsf3 at plt
+; RV64-NO-ATOMIC-NEXT:    call __addsf3
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 4
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB28_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -1987,12 +1987,12 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    lui a1, 260096
 ; RV64-ATOMIC-NEXT:    mv a0, s1
-; RV64-ATOMIC-NEXT:    call __addsf3 at plt
+; RV64-ATOMIC-NEXT:    call __addsf3
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-NEXT:    mv s1, a0
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB28_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2017,12 +2017,12 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    lui a1, 260096
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call __addsf3 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __addsf3
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB28_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2050,14 +2050,14 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NO-ATOMIC-NEXT:    lui a1, 784384
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s1
-; RV32-NO-ATOMIC-NEXT:    call __addsf3 at plt
+; RV32-NO-ATOMIC-NEXT:    call __addsf3
 ; RV32-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV32-NO-ATOMIC-NEXT:    sw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    mv a1, sp
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    beqz a0, .LBB29_1
 ; RV32-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2080,11 +2080,11 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-NEXT:    mv s1, a0
 ; RV32-ATOMIC-NEXT:    lui a1, 784384
-; RV32-ATOMIC-NEXT:    call __addsf3 at plt
+; RV32-ATOMIC-NEXT:    call __addsf3
 ; RV32-ATOMIC-NEXT:    mv a2, a0
 ; RV32-ATOMIC-NEXT:    mv a0, s0
 ; RV32-ATOMIC-NEXT:    mv a1, s1
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-NEXT:    bne a0, s1, .LBB29_1
 ; RV32-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2105,11 +2105,11 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    lui a1, 784384
-; RV32-ATOMIC-TRAILING-NEXT:    call __addsf3 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __addsf3
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-TRAILING-NEXT:    bne a0, s1, .LBB29_1
 ; RV32-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2130,14 +2130,14 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    lui a1, 784384
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s1
-; RV64-NO-ATOMIC-NEXT:    call __addsf3 at plt
+; RV64-NO-ATOMIC-NEXT:    call __addsf3
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 4
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB29_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2161,12 +2161,12 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    lui a1, 784384
 ; RV64-ATOMIC-NEXT:    mv a0, s1
-; RV64-ATOMIC-NEXT:    call __addsf3 at plt
+; RV64-ATOMIC-NEXT:    call __addsf3
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-NEXT:    mv s1, a0
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB29_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2191,12 +2191,12 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    lui a1, 784384
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call __addsf3 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __addsf3
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB29_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2224,14 +2224,14 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NO-ATOMIC-NEXT:    lui a1, 260096
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s1
-; RV32-NO-ATOMIC-NEXT:    call fminf at plt
+; RV32-NO-ATOMIC-NEXT:    call fminf
 ; RV32-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV32-NO-ATOMIC-NEXT:    sw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    mv a1, sp
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    beqz a0, .LBB30_1
 ; RV32-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2254,11 +2254,11 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-NEXT:    mv s1, a0
 ; RV32-ATOMIC-NEXT:    lui a1, 260096
-; RV32-ATOMIC-NEXT:    call fminf at plt
+; RV32-ATOMIC-NEXT:    call fminf
 ; RV32-ATOMIC-NEXT:    mv a2, a0
 ; RV32-ATOMIC-NEXT:    mv a0, s0
 ; RV32-ATOMIC-NEXT:    mv a1, s1
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-NEXT:    bne a0, s1, .LBB30_1
 ; RV32-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2279,11 +2279,11 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    lui a1, 260096
-; RV32-ATOMIC-TRAILING-NEXT:    call fminf at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call fminf
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-TRAILING-NEXT:    bne a0, s1, .LBB30_1
 ; RV32-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2304,14 +2304,14 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    lui a1, 260096
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s1
-; RV64-NO-ATOMIC-NEXT:    call fminf at plt
+; RV64-NO-ATOMIC-NEXT:    call fminf
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 4
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB30_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2335,12 +2335,12 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    lui a1, 260096
 ; RV64-ATOMIC-NEXT:    mv a0, s1
-; RV64-ATOMIC-NEXT:    call fminf at plt
+; RV64-ATOMIC-NEXT:    call fminf
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-NEXT:    mv s1, a0
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB30_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2365,12 +2365,12 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    lui a1, 260096
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call fminf at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call fminf
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB30_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2398,14 +2398,14 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NO-ATOMIC-NEXT:    lui a1, 260096
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s1
-; RV32-NO-ATOMIC-NEXT:    call fmaxf at plt
+; RV32-NO-ATOMIC-NEXT:    call fmaxf
 ; RV32-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV32-NO-ATOMIC-NEXT:    sw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    mv a1, sp
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
 ; RV32-NO-ATOMIC-NEXT:    mv a0, s0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw s1, 0(sp)
 ; RV32-NO-ATOMIC-NEXT:    beqz a0, .LBB31_1
 ; RV32-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2428,11 +2428,11 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-NEXT:    mv s1, a0
 ; RV32-ATOMIC-NEXT:    lui a1, 260096
-; RV32-ATOMIC-NEXT:    call fmaxf at plt
+; RV32-ATOMIC-NEXT:    call fmaxf
 ; RV32-ATOMIC-NEXT:    mv a2, a0
 ; RV32-ATOMIC-NEXT:    mv a0, s0
 ; RV32-ATOMIC-NEXT:    mv a1, s1
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-NEXT:    bne a0, s1, .LBB31_1
 ; RV32-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2453,11 +2453,11 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    lui a1, 260096
-; RV32-ATOMIC-TRAILING-NEXT:    call fmaxf at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call fmaxf
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV32-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-TRAILING-NEXT:    bne a0, s1, .LBB31_1
 ; RV32-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2478,14 +2478,14 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    lui a1, 260096
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s1
-; RV64-NO-ATOMIC-NEXT:    call fmaxf at plt
+; RV64-NO-ATOMIC-NEXT:    call fmaxf
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 4
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw s1, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB31_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2509,12 +2509,12 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    lui a1, 260096
 ; RV64-ATOMIC-NEXT:    mv a0, s1
-; RV64-ATOMIC-NEXT:    call fmaxf at plt
+; RV64-ATOMIC-NEXT:    call fmaxf
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-NEXT:    mv s1, a0
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB31_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2539,12 +2539,12 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    lui a1, 260096
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call fmaxf at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call fmaxf
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    sext.w s2, s1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s1, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB31_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
@@ -2569,7 +2569,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a2, 1
 ; RV32-NO-ATOMIC-NEXT:    li a3, 0
 ; RV32-NO-ATOMIC-NEXT:    li a4, 0
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw a0, 8(sp)
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -2581,7 +2581,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a2, 1
 ; RV32-ATOMIC-NEXT:    li a1, 0
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -2592,7 +2592,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -2606,7 +2606,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a2, 1
 ; RV64-NO-ATOMIC-NEXT:    li a3, 0
 ; RV64-NO-ATOMIC-NEXT:    li a4, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw a0, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -2618,7 +2618,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a2, 1
 ; RV64-ATOMIC-NEXT:    li a1, 0
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -2629,7 +2629,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -2648,7 +2648,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
 ; RV32-NO-ATOMIC-NEXT:    li a2, 1
 ; RV32-NO-ATOMIC-NEXT:    li a3, 5
 ; RV32-NO-ATOMIC-NEXT:    li a4, 5
-; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV32-NO-ATOMIC-NEXT:    lw a0, 8(sp)
 ; RV32-NO-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -2660,7 +2660,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-NEXT:    li a2, 1
 ; RV32-ATOMIC-NEXT:    li a1, 0
-; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-NEXT:    ret
@@ -2671,7 +2671,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
 ; RV32-ATOMIC-TRAILING-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV32-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV32-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV32-ATOMIC-TRAILING-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV32-ATOMIC-TRAILING-NEXT:    ret
@@ -2685,7 +2685,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a2, 1
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_4
 ; RV64-NO-ATOMIC-NEXT:    lw a0, 4(sp)
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -2697,7 +2697,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a2, 1
 ; RV64-ATOMIC-NEXT:    li a1, 0
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -2708,7 +2708,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_4
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -2723,7 +2723,7 @@ define i64 @load64_unordered(ptr %p) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a1, 0
-; RV32-NEXT:    call __atomic_load_8 at plt
+; RV32-NEXT:    call __atomic_load_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2733,7 +2733,7 @@ define i64 @load64_unordered(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -2757,7 +2757,7 @@ define i64 @load64_monotonic(ptr %p) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a1, 0
-; RV32-NEXT:    call __atomic_load_8 at plt
+; RV32-NEXT:    call __atomic_load_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2767,7 +2767,7 @@ define i64 @load64_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -2791,7 +2791,7 @@ define i64 @load64_acquire(ptr %p) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a1, 2
-; RV32-NEXT:    call __atomic_load_8 at plt
+; RV32-NEXT:    call __atomic_load_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2801,7 +2801,7 @@ define i64 @load64_acquire(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 2
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -2827,7 +2827,7 @@ define i64 @load64_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a1, 5
-; RV32-NEXT:    call __atomic_load_8 at plt
+; RV32-NEXT:    call __atomic_load_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2837,7 +2837,7 @@ define i64 @load64_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_load_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -2867,7 +2867,7 @@ define void @store64_unordered(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    li a2, 0
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __atomic_store_8 at plt
+; RV32-NEXT:    call __atomic_store_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2878,7 +2878,7 @@ define void @store64_unordered(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
 ; RV64-NO-ATOMIC-NEXT:    li a2, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -2904,7 +2904,7 @@ define void @store64_monotonic(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    li a2, 0
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __atomic_store_8 at plt
+; RV32-NEXT:    call __atomic_store_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2915,7 +2915,7 @@ define void @store64_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
 ; RV64-NO-ATOMIC-NEXT:    li a2, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -2941,7 +2941,7 @@ define void @store64_release(ptr %p) nounwind {
 ; RV32-NEXT:    li a3, 3
 ; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_store_8 at plt
+; RV32-NEXT:    call __atomic_store_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2952,7 +2952,7 @@ define void @store64_release(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a2, 3
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -2980,7 +2980,7 @@ define void @store64(ptr %p) nounwind {
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_store_8 at plt
+; RV32-NEXT:    call __atomic_store_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2991,7 +2991,7 @@ define void @store64(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
 ; RV64-NO-ATOMIC-NEXT:    li a1, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_store_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3020,7 +3020,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a2, 0
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32-NEXT:    call __atomic_fetch_add_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3031,7 +3031,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3041,7 +3041,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3051,7 +3051,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3067,7 +3067,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32-NEXT:    call __atomic_fetch_add_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3078,7 +3078,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_add_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3088,7 +3088,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_add_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3098,7 +3098,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_add_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3114,7 +3114,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32-NEXT:    call __atomic_fetch_sub_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3125,7 +3125,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_sub_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3135,7 +3135,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_sub_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_sub_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3145,7 +3145,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_sub_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_sub_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3161,7 +3161,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32-NEXT:    call __atomic_fetch_and_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3172,7 +3172,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_and_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3182,7 +3182,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_and_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_and_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3192,7 +3192,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_and_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_and_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3208,7 +3208,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32-NEXT:    call __atomic_fetch_nand_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3219,7 +3219,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_nand_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3229,7 +3229,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_nand_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_nand_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3239,7 +3239,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_nand_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_nand_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3255,7 +3255,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32-NEXT:    call __atomic_fetch_or_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3266,7 +3266,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_or_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3276,7 +3276,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_or_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_or_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3286,7 +3286,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_or_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_or_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3302,7 +3302,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32-NEXT:    call __atomic_fetch_xor_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3313,7 +3313,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_fetch_xor_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3323,7 +3323,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_xor_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_xor_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3333,7 +3333,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_xor_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_xor_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3361,7 +3361,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a1, 4(sp)
 ; RV32-NEXT:    lw a4, 0(sp)
 ; RV32-NEXT:    bnez a0, .LBB49_6
@@ -3405,7 +3405,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld a1, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    bnez a0, .LBB49_4
 ; RV64-NO-ATOMIC-NEXT:  .LBB49_2: # %atomicrmw.start
@@ -3428,7 +3428,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_max_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_max_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3438,7 +3438,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_max_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_max_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3466,7 +3466,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a1, 4(sp)
 ; RV32-NEXT:    lw a4, 0(sp)
 ; RV32-NEXT:    bnez a0, .LBB50_6
@@ -3511,7 +3511,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld a1, 0(sp)
 ; RV64-NO-ATOMIC-NEXT:    bnez a0, .LBB50_4
 ; RV64-NO-ATOMIC-NEXT:  .LBB50_2: # %atomicrmw.start
@@ -3535,7 +3535,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_min_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_min_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3545,7 +3545,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_min_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_min_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3573,7 +3573,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a1, 4(sp)
 ; RV32-NEXT:    lw a4, 0(sp)
 ; RV32-NEXT:    bnez a0, .LBB51_4
@@ -3612,7 +3612,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld a1, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB51_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -3627,7 +3627,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umax_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umax_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3637,7 +3637,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umax_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umax_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3665,7 +3665,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a1, 4(sp)
 ; RV32-NEXT:    lw a4, 0(sp)
 ; RV32-NEXT:    bnez a0, .LBB52_4
@@ -3704,7 +3704,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld a1, 0(sp)
 ; RV64-NO-ATOMIC-NEXT:    bnez a0, .LBB52_4
 ; RV64-NO-ATOMIC-NEXT:  .LBB52_2: # %atomicrmw.start
@@ -3728,7 +3728,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umin_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_fetch_and_umin_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3738,7 +3738,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umin_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_fetch_and_umin_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3754,7 +3754,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    li a3, 5
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __atomic_exchange_8 at plt
+; RV32-NEXT:    call __atomic_exchange_8
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3765,7 +3765,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NO-ATOMIC-NEXT:    li a1, 1
 ; RV64-NO-ATOMIC-NEXT:    li a2, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-NO-ATOMIC-NEXT:    ret
@@ -3775,7 +3775,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a1, 1
-; RV64-ATOMIC-NEXT:    call __sync_lock_test_and_set_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_lock_test_and_set_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -3785,7 +3785,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, -16
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_lock_test_and_set_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_lock_test_and_set_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -3810,7 +3810,7 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    mv a0, s2
 ; RV32-NEXT:    mv a1, s1
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __adddf3 at plt
+; RV32-NEXT:    call __adddf3
 ; RV32-NEXT:    mv a2, a0
 ; RV32-NEXT:    mv a3, a1
 ; RV32-NEXT:    sw s2, 8(sp)
@@ -3819,7 +3819,7 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw s1, 12(sp)
 ; RV32-NEXT:    lw s2, 8(sp)
 ; RV32-NEXT:    beqz a0, .LBB54_1
@@ -3848,14 +3848,14 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
 ; RV64-NO-ATOMIC-NEXT:    mv a1, s1
-; RV64-NO-ATOMIC-NEXT:    call __adddf3 at plt
+; RV64-NO-ATOMIC-NEXT:    call __adddf3
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 8
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB54_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -3882,11 +3882,11 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0
 ; RV64-ATOMIC-NEXT:    mv a1, s1
-; RV64-ATOMIC-NEXT:    call __adddf3 at plt
+; RV64-ATOMIC-NEXT:    call __adddf3
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB54_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -3911,11 +3911,11 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call __adddf3 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __adddf3
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB54_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -3945,7 +3945,7 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    mv a0, s2
 ; RV32-NEXT:    mv a1, s1
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call __adddf3 at plt
+; RV32-NEXT:    call __adddf3
 ; RV32-NEXT:    mv a2, a0
 ; RV32-NEXT:    mv a3, a1
 ; RV32-NEXT:    sw s2, 8(sp)
@@ -3954,7 +3954,7 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw s1, 12(sp)
 ; RV32-NEXT:    lw s2, 8(sp)
 ; RV32-NEXT:    beqz a0, .LBB55_1
@@ -3983,14 +3983,14 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
 ; RV64-NO-ATOMIC-NEXT:    mv a1, s1
-; RV64-NO-ATOMIC-NEXT:    call __adddf3 at plt
+; RV64-NO-ATOMIC-NEXT:    call __adddf3
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 8
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB55_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -4017,11 +4017,11 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0
 ; RV64-ATOMIC-NEXT:    mv a1, s1
-; RV64-ATOMIC-NEXT:    call __adddf3 at plt
+; RV64-ATOMIC-NEXT:    call __adddf3
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB55_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -4046,11 +4046,11 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call __adddf3 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __adddf3
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB55_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -4080,7 +4080,7 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    mv a0, s2
 ; RV32-NEXT:    mv a1, s1
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call fmin at plt
+; RV32-NEXT:    call fmin
 ; RV32-NEXT:    mv a2, a0
 ; RV32-NEXT:    mv a3, a1
 ; RV32-NEXT:    sw s2, 8(sp)
@@ -4089,7 +4089,7 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw s1, 12(sp)
 ; RV32-NEXT:    lw s2, 8(sp)
 ; RV32-NEXT:    beqz a0, .LBB56_1
@@ -4118,14 +4118,14 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
 ; RV64-NO-ATOMIC-NEXT:    mv a1, s1
-; RV64-NO-ATOMIC-NEXT:    call fmin at plt
+; RV64-NO-ATOMIC-NEXT:    call fmin
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 8
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB56_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -4152,11 +4152,11 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0
 ; RV64-ATOMIC-NEXT:    mv a1, s1
-; RV64-ATOMIC-NEXT:    call fmin at plt
+; RV64-ATOMIC-NEXT:    call fmin
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB56_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -4181,11 +4181,11 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call fmin at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call fmin
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB56_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -4215,7 +4215,7 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    mv a0, s2
 ; RV32-NEXT:    mv a1, s1
 ; RV32-NEXT:    li a2, 0
-; RV32-NEXT:    call fmax at plt
+; RV32-NEXT:    call fmax
 ; RV32-NEXT:    mv a2, a0
 ; RV32-NEXT:    mv a3, a1
 ; RV32-NEXT:    sw s2, 8(sp)
@@ -4224,7 +4224,7 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a0, s0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw s1, 12(sp)
 ; RV32-NEXT:    lw s2, 8(sp)
 ; RV32-NEXT:    beqz a0, .LBB57_1
@@ -4253,14 +4253,14 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s2
 ; RV64-NO-ATOMIC-NEXT:    mv a1, s1
-; RV64-NO-ATOMIC-NEXT:    call fmax at plt
+; RV64-NO-ATOMIC-NEXT:    call fmax
 ; RV64-NO-ATOMIC-NEXT:    mv a2, a0
 ; RV64-NO-ATOMIC-NEXT:    sd s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    addi a1, sp, 8
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
 ; RV64-NO-ATOMIC-NEXT:    mv a0, s0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld s2, 8(sp)
 ; RV64-NO-ATOMIC-NEXT:    beqz a0, .LBB57_1
 ; RV64-NO-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
@@ -4287,11 +4287,11 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-NEXT:    mv s2, a0
 ; RV64-ATOMIC-NEXT:    mv a1, s1
-; RV64-ATOMIC-NEXT:    call fmax at plt
+; RV64-ATOMIC-NEXT:    call fmax
 ; RV64-ATOMIC-NEXT:    mv a2, a0
 ; RV64-ATOMIC-NEXT:    mv a0, s0
 ; RV64-ATOMIC-NEXT:    mv a1, s2
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-NEXT:    bne a0, s2, .LBB57_1
 ; RV64-ATOMIC-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -4316,11 +4316,11 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-ATOMIC-TRAILING-NEXT:    mv s2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT:    call fmax at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call fmax
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a2, a0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a0, s0
 ; RV64-ATOMIC-TRAILING-NEXT:    mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-TRAILING-NEXT:    bne a0, s2, .LBB57_1
 ; RV64-ATOMIC-TRAILING-NEXT:  # %bb.2: # %atomicrmw.end
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -4345,7 +4345,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
 ; RV32-NEXT:    li a3, 0
 ; RV32-NEXT:    li a4, 0
 ; RV32-NEXT:    li a5, 0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a1, 4(sp)
 ; RV32-NEXT:    lw a0, 0(sp)
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4361,7 +4361,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a2, 1
 ; RV64-NO-ATOMIC-NEXT:    li a3, 0
 ; RV64-NO-ATOMIC-NEXT:    li a4, 0
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld a0, 0(sp)
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -4373,7 +4373,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a2, 1
 ; RV64-ATOMIC-NEXT:    li a1, 0
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -4384,7 +4384,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -4405,7 +4405,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32-NEXT:    call __atomic_compare_exchange_8
 ; RV32-NEXT:    lw a1, 4(sp)
 ; RV32-NEXT:    lw a0, 0(sp)
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4421,7 +4421,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
 ; RV64-NO-ATOMIC-NEXT:    li a2, 1
 ; RV64-NO-ATOMIC-NEXT:    li a3, 5
 ; RV64-NO-ATOMIC-NEXT:    li a4, 5
-; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64-NO-ATOMIC-NEXT:    call __atomic_compare_exchange_8
 ; RV64-NO-ATOMIC-NEXT:    ld a0, 0(sp)
 ; RV64-NO-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NO-ATOMIC-NEXT:    addi sp, sp, 16
@@ -4433,7 +4433,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-NEXT:    li a2, 1
 ; RV64-ATOMIC-NEXT:    li a1, 0
-; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-NEXT:    ret
@@ -4444,7 +4444,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
 ; RV64-ATOMIC-TRAILING-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ATOMIC-TRAILING-NEXT:    li a2, 1
 ; RV64-ATOMIC-TRAILING-NEXT:    li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8 at plt
+; RV64-ATOMIC-TRAILING-NEXT:    call __sync_val_compare_and_swap_8
 ; RV64-ATOMIC-TRAILING-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ATOMIC-TRAILING-NEXT:    addi sp, sp, 16
 ; RV64-ATOMIC-TRAILING-NEXT:    ret
@@ -4463,7 +4463,7 @@ define i128 @load128(ptr %p) nounwind {
 ; RV32-NEXT:    li a0, 16
 ; RV32-NEXT:    addi a2, sp, 8
 ; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_load at plt
+; RV32-NEXT:    call __atomic_load
 ; RV32-NEXT:    lw a0, 20(sp)
 ; RV32-NEXT:    lw a1, 16(sp)
 ; RV32-NEXT:    lw a2, 12(sp)
@@ -4482,7 +4482,7 @@ define i128 @load128(ptr %p) nounwind {
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    li a1, 5
-; RV64-NEXT:    call __atomic_load_16 at plt
+; RV64-NEXT:    call __atomic_load_16
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -4503,7 +4503,7 @@ define void @store128(ptr %p) nounwind {
 ; RV32-NEXT:    li a0, 16
 ; RV32-NEXT:    addi a2, sp, 8
 ; RV32-NEXT:    li a3, 5
-; RV32-NEXT:    call __atomic_store at plt
+; RV32-NEXT:    call __atomic_store
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
@@ -4515,7 +4515,7 @@ define void @store128(ptr %p) nounwind {
 ; RV64-NEXT:    li a3, 5
 ; RV64-NEXT:    li a1, 0
 ; RV64-NEXT:    li a2, 0
-; RV64-NEXT:    call __atomic_store_16 at plt
+; RV64-NEXT:    call __atomic_store_16
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -4560,7 +4560,7 @@ define i128 @rmw128(ptr %p) nounwind {
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
 ; RV32-NEXT:    mv a1, s0
-; RV32-NEXT:    call __atomic_compare_exchange at plt
+; RV32-NEXT:    call __atomic_compare_exchange
 ; RV32-NEXT:    lw a1, 28(sp)
 ; RV32-NEXT:    lw a2, 24(sp)
 ; RV32-NEXT:    lw a3, 20(sp)
@@ -4584,7 +4584,7 @@ define i128 @rmw128(ptr %p) nounwind {
 ; RV64-NEXT:    li a1, 1
 ; RV64-NEXT:    li a3, 5
 ; RV64-NEXT:    li a2, 0
-; RV64-NEXT:    call __atomic_fetch_add_16 at plt
+; RV64-NEXT:    call __atomic_fetch_add_16
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -4613,7 +4613,7 @@ define i128 @cmpxchg128(ptr %p) nounwind {
 ; RV32-NEXT:    addi a3, sp, 8
 ; RV32-NEXT:    li a4, 5
 ; RV32-NEXT:    li a5, 5
-; RV32-NEXT:    call __atomic_compare_exchange at plt
+; RV32-NEXT:    call __atomic_compare_exchange
 ; RV32-NEXT:    lw a0, 36(sp)
 ; RV32-NEXT:    lw a1, 32(sp)
 ; RV32-NEXT:    lw a2, 28(sp)
@@ -4638,7 +4638,7 @@ define i128 @cmpxchg128(ptr %p) nounwind {
 ; RV64-NEXT:    li a4, 5
 ; RV64-NEXT:    li a5, 5
 ; RV64-NEXT:    li a3, 0
-; RV64-NEXT:    call __atomic_compare_exchange_16 at plt
+; RV64-NEXT:    call __atomic_compare_exchange_16
 ; RV64-NEXT:    ld a1, 8(sp)
 ; RV64-NEXT:    ld a0, 0(sp)
 ; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/fp128.ll b/llvm/test/CodeGen/RISCV/fp128.ll
index 77a7914cdca632..611a70c0ad8b93 100644
--- a/llvm/test/CodeGen/RISCV/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/fp128.ll
@@ -33,7 +33,7 @@ define i32 @test_load_and_cmp() nounwind {
 ; RV32I-NEXT:    addi a0, sp, 24
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    sw a2, 24(sp)
-; RV32I-NEXT:    call __netf2 at plt
+; RV32I-NEXT:    call __netf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
@@ -71,7 +71,7 @@ define i32 @test_add_and_fptosi() nounwind {
 ; RV32I-NEXT:    addi a1, sp, 40
 ; RV32I-NEXT:    addi a2, sp, 24
 ; RV32I-NEXT:    sw a3, 40(sp)
-; RV32I-NEXT:    call __addtf3 at plt
+; RV32I-NEXT:    call __addtf3
 ; RV32I-NEXT:    lw a1, 56(sp)
 ; RV32I-NEXT:    lw a0, 60(sp)
 ; RV32I-NEXT:    lw a2, 64(sp)
@@ -81,7 +81,7 @@ define i32 @test_add_and_fptosi() nounwind {
 ; RV32I-NEXT:    sw a0, 12(sp)
 ; RV32I-NEXT:    addi a0, sp, 8
 ; RV32I-NEXT:    sw a1, 8(sp)
-; RV32I-NEXT:    call __fixtfsi at plt
+; RV32I-NEXT:    call __fixtfsi
 ; RV32I-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 80
 ; RV32I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/fp16-promote.ll b/llvm/test/CodeGen/RISCV/fp16-promote.ll
index 2a03746b1f7eda..31842316606cc8 100644
--- a/llvm/test/CodeGen/RISCV/fp16-promote.ll
+++ b/llvm/test/CodeGen/RISCV/fp16-promote.ll
@@ -19,7 +19,7 @@ define float @test_fpextend_float(ptr %p) nounwind {
 ; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lhu a0, 0(a0)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
@@ -35,7 +35,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
 ; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lhu a0, 0(a0)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fcvt.d.s fa0, fa0
 ; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
@@ -52,7 +52,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a0
-; CHECK-NEXT:    call __truncsfhf2 at plt
+; CHECK-NEXT:    call __truncsfhf2
 ; CHECK-NEXT:    fmv.x.w a0, fa0
 ; CHECK-NEXT:    sh a0, 0(s0)
 ; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -71,7 +71,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a0
-; CHECK-NEXT:    call __truncdfhf2 at plt
+; CHECK-NEXT:    call __truncdfhf2
 ; CHECK-NEXT:    fmv.x.w a0, fa0
 ; CHECK-NEXT:    sh a0, 0(s0)
 ; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -96,12 +96,12 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
 ; CHECK-NEXT:    lhu a1, 0(a1)
 ; CHECK-NEXT:    fmv.w.x fs0, a0
 ; CHECK-NEXT:    fmv.w.x fa0, a1
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs1, fa0
 ; CHECK-NEXT:    fmv.s fa0, fs0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fadd.s fa0, fa0, fs1
-; CHECK-NEXT:    call __truncsfhf2 at plt
+; CHECK-NEXT:    call __truncsfhf2
 ; CHECK-NEXT:    fmv.x.w a0, fa0
 ; CHECK-NEXT:    sh a0, 0(s0)
 ; CHECK-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -130,12 +130,12 @@ define void @test_fmul(ptr %p, ptr %q) nounwind {
 ; CHECK-NEXT:    lhu a1, 0(a1)
 ; CHECK-NEXT:    fmv.w.x fs0, a0
 ; CHECK-NEXT:    fmv.w.x fa0, a1
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs1, fa0
 ; CHECK-NEXT:    fmv.s fa0, fs0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmul.s fa0, fa0, fs1
-; CHECK-NEXT:    call __truncsfhf2 at plt
+; CHECK-NEXT:    call __truncsfhf2
 ; CHECK-NEXT:    fmv.x.w a0, fa0
 ; CHECK-NEXT:    sh a0, 0(s0)
 ; CHECK-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index b091b0613c0f30..3880ac9f4ec623 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -17,7 +17,7 @@ define i32 @stest_f64i32(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfdi at plt
+; RV32IF-NEXT:    call __fixdfdi
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    addi a3, a2, -1
 ; RV32IF-NEXT:    beqz a1, .LBB0_2
@@ -56,7 +56,7 @@ define i32 @stest_f64i32(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfdi at plt
+; RV64IF-NEXT:    call __fixdfdi
 ; RV64IF-NEXT:    lui a1, 524288
 ; RV64IF-NEXT:    addiw a2, a1, -1
 ; RV64IF-NEXT:    blt a0, a2, .LBB0_2
@@ -113,7 +113,7 @@ define i32 @utest_f64i32(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunsdfdi at plt
+; RV32IF-NEXT:    call __fixunsdfdi
 ; RV32IF-NEXT:    sltiu a2, a0, -1
 ; RV32IF-NEXT:    seqz a1, a1
 ; RV32IF-NEXT:    and a1, a1, a2
@@ -129,7 +129,7 @@ define i32 @utest_f64i32(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixunsdfdi at plt
+; RV64IF-NEXT:    call __fixunsdfdi
 ; RV64IF-NEXT:    li a1, -1
 ; RV64IF-NEXT:    srli a1, a1, 32
 ; RV64IF-NEXT:    bltu a0, a1, .LBB1_2
@@ -174,7 +174,7 @@ define i32 @ustest_f64i32(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfdi at plt
+; RV32IF-NEXT:    call __fixdfdi
 ; RV32IF-NEXT:    beqz a1, .LBB2_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
 ; RV32IF-NEXT:    slti a2, a1, 0
@@ -205,7 +205,7 @@ define i32 @ustest_f64i32(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfdi at plt
+; RV64IF-NEXT:    call __fixdfdi
 ; RV64IF-NEXT:    li a1, -1
 ; RV64IF-NEXT:    srli a1, a1, 32
 ; RV64IF-NEXT:    blt a0, a1, .LBB2_2
@@ -355,8 +355,8 @@ define i32 @stest_f16i32(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
-; RV32-NEXT:    call __fixsfdi at plt
+; RV32-NEXT:    call __extendhfsf2
+; RV32-NEXT:    call __fixsfdi
 ; RV32-NEXT:    lui a2, 524288
 ; RV32-NEXT:    addi a3, a2, -1
 ; RV32-NEXT:    beqz a1, .LBB6_2
@@ -395,7 +395,7 @@ define i32 @stest_f16i32(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    lui a1, 524288
 ; RV64-NEXT:    addiw a2, a1, -1
@@ -427,8 +427,8 @@ define i32 @utesth_f16i32(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
-; RV32-NEXT:    call __fixunssfdi at plt
+; RV32-NEXT:    call __extendhfsf2
+; RV32-NEXT:    call __fixunssfdi
 ; RV32-NEXT:    sltiu a2, a0, -1
 ; RV32-NEXT:    seqz a1, a1
 ; RV32-NEXT:    and a1, a1, a2
@@ -444,7 +444,7 @@ define i32 @utesth_f16i32(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64-NEXT:    li a1, -1
 ; RV64-NEXT:    srli a1, a1, 32
@@ -470,8 +470,8 @@ define i32 @ustest_f16i32(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
-; RV32-NEXT:    call __fixsfdi at plt
+; RV32-NEXT:    call __extendhfsf2
+; RV32-NEXT:    call __fixsfdi
 ; RV32-NEXT:    beqz a1, .LBB8_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    slti a2, a1, 0
@@ -502,7 +502,7 @@ define i32 @ustest_f16i32(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    li a1, -1
 ; RV64-NEXT:    srli a1, a1, 32
@@ -535,7 +535,7 @@ define i16 @stest_f64i16(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfsi at plt
+; RV32IF-NEXT:    call __fixdfsi
 ; RV32IF-NEXT:    lui a1, 8
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    blt a0, a1, .LBB9_2
@@ -557,7 +557,7 @@ define i16 @stest_f64i16(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfsi at plt
+; RV64IF-NEXT:    call __fixdfsi
 ; RV64IF-NEXT:    lui a1, 8
 ; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    blt a0, a1, .LBB9_2
@@ -627,7 +627,7 @@ define i16 @utest_f64i16(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunsdfsi at plt
+; RV32IF-NEXT:    call __fixunsdfsi
 ; RV32IF-NEXT:    lui a1, 16
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    bltu a0, a1, .LBB10_2
@@ -644,7 +644,7 @@ define i16 @utest_f64i16(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixunsdfsi at plt
+; RV64IF-NEXT:    call __fixunsdfsi
 ; RV64IF-NEXT:    lui a1, 16
 ; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    bltu a0, a1, .LBB10_2
@@ -691,7 +691,7 @@ define i16 @ustest_f64i16(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfsi at plt
+; RV32IF-NEXT:    call __fixdfsi
 ; RV32IF-NEXT:    lui a1, 16
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    blt a0, a1, .LBB11_2
@@ -711,7 +711,7 @@ define i16 @ustest_f64i16(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfsi at plt
+; RV64IF-NEXT:    call __fixdfsi
 ; RV64IF-NEXT:    lui a1, 16
 ; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    blt a0, a1, .LBB11_2
@@ -885,7 +885,7 @@ define i16 @stest_f16i16(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32-NEXT:    lui a1, 8
 ; RV32-NEXT:    addi a1, a1, -1
@@ -908,7 +908,7 @@ define i16 @stest_f16i16(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    lui a1, 8
 ; RV64-NEXT:    addiw a1, a1, -1
@@ -941,7 +941,7 @@ define i16 @utesth_f16i16(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -1
@@ -959,7 +959,7 @@ define i16 @utesth_f16i16(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64-NEXT:    lui a1, 16
 ; RV64-NEXT:    addiw a1, a1, -1
@@ -985,7 +985,7 @@ define i16 @ustest_f16i16(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -1
@@ -1006,7 +1006,7 @@ define i16 @ustest_f16i16(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    lui a1, 16
 ; RV64-NEXT:    addiw a1, a1, -1
@@ -1042,7 +1042,7 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IF-NEXT:    mv a2, a1
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call __fixdfti at plt
+; RV32IF-NEXT:    call __fixdfti
 ; RV32IF-NEXT:    lw a0, 20(sp)
 ; RV32IF-NEXT:    lw a2, 16(sp)
 ; RV32IF-NEXT:    lw a1, 12(sp)
@@ -1101,7 +1101,7 @@ define i64 @stest_f64i64(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfti at plt
+; RV64IF-NEXT:    call __fixdfti
 ; RV64IF-NEXT:    li a2, -1
 ; RV64IF-NEXT:    srli a3, a2, 1
 ; RV64IF-NEXT:    beqz a1, .LBB18_2
@@ -1141,7 +1141,7 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call __fixdfti at plt
+; RV32IFD-NEXT:    call __fixdfti
 ; RV32IFD-NEXT:    lw a0, 20(sp)
 ; RV32IFD-NEXT:    lw a2, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
@@ -1222,7 +1222,7 @@ define i64 @utest_f64i64(double %x) {
 ; RV32IF-NEXT:    mv a2, a1
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call __fixunsdfti at plt
+; RV32IF-NEXT:    call __fixunsdfti
 ; RV32IF-NEXT:    lw a0, 16(sp)
 ; RV32IF-NEXT:    lw a1, 20(sp)
 ; RV32IF-NEXT:    lw a2, 12(sp)
@@ -1247,7 +1247,7 @@ define i64 @utest_f64i64(double %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixunsdfti at plt
+; RV64-NEXT:    call __fixunsdfti
 ; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
@@ -1262,7 +1262,7 @@ define i64 @utest_f64i64(double %x) {
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call __fixunsdfti at plt
+; RV32IFD-NEXT:    call __fixunsdfti
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 20(sp)
 ; RV32IFD-NEXT:    lw a2, 12(sp)
@@ -1298,7 +1298,7 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IF-NEXT:    mv a2, a1
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call __fixdfti at plt
+; RV32IF-NEXT:    call __fixdfti
 ; RV32IF-NEXT:    lw a1, 20(sp)
 ; RV32IF-NEXT:    lw a0, 16(sp)
 ; RV32IF-NEXT:    beqz a1, .LBB20_2
@@ -1349,7 +1349,7 @@ define i64 @ustest_f64i64(double %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixdfti at plt
+; RV64-NEXT:    call __fixdfti
 ; RV64-NEXT:    slti a2, a1, 1
 ; RV64-NEXT:    blez a1, .LBB20_2
 ; RV64-NEXT:  # %bb.1: # %entry
@@ -1377,7 +1377,7 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call __fixdfti at plt
+; RV32IFD-NEXT:    call __fixdfti
 ; RV32IFD-NEXT:    lw a1, 20(sp)
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    beqz a1, .LBB20_2
@@ -1439,7 +1439,7 @@ define i64 @stest_f32i64(float %x) {
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a0, 20(sp)
 ; RV32-NEXT:    lw a2, 16(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
@@ -1518,7 +1518,7 @@ define i64 @utest_f32i64(float %x) {
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixunssfti at plt
+; RV32-NEXT:    call __fixunssfti
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    lw a1, 20(sp)
 ; RV32-NEXT:    lw a2, 12(sp)
@@ -1543,7 +1543,7 @@ define i64 @utest_f32i64(float %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixunssfti at plt
+; RV64-NEXT:    call __fixunssfti
 ; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
@@ -1566,7 +1566,7 @@ define i64 @ustest_f32i64(float %x) {
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a1, 20(sp)
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    beqz a1, .LBB23_2
@@ -1617,7 +1617,7 @@ define i64 @ustest_f32i64(float %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixsfti at plt
+; RV64-NEXT:    call __fixsfti
 ; RV64-NEXT:    slti a2, a1, 1
 ; RV64-NEXT:    blez a1, .LBB23_2
 ; RV64-NEXT:  # %bb.1: # %entry
@@ -1654,9 +1654,9 @@ define i64 @stest_f16i64(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a0, 20(sp)
 ; RV32-NEXT:    lw a2, 16(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
@@ -1715,8 +1715,8 @@ define i64 @stest_f16i64(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
-; RV64-NEXT:    call __fixsfti at plt
+; RV64-NEXT:    call __extendhfsf2
+; RV64-NEXT:    call __fixsfti
 ; RV64-NEXT:    li a2, -1
 ; RV64-NEXT:    srli a3, a2, 1
 ; RV64-NEXT:    beqz a1, .LBB24_2
@@ -1765,9 +1765,9 @@ define i64 @utesth_f16i64(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixunssfti at plt
+; RV32-NEXT:    call __fixunssfti
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    lw a1, 20(sp)
 ; RV32-NEXT:    lw a2, 12(sp)
@@ -1792,8 +1792,8 @@ define i64 @utesth_f16i64(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
-; RV64-NEXT:    call __fixunssfti at plt
+; RV64-NEXT:    call __extendhfsf2
+; RV64-NEXT:    call __fixunssfti
 ; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
@@ -1815,9 +1815,9 @@ define i64 @ustest_f16i64(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a1, 20(sp)
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    beqz a1, .LBB26_2
@@ -1868,8 +1868,8 @@ define i64 @ustest_f16i64(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
-; RV64-NEXT:    call __fixsfti at plt
+; RV64-NEXT:    call __extendhfsf2
+; RV64-NEXT:    call __fixsfti
 ; RV64-NEXT:    slti a2, a1, 1
 ; RV64-NEXT:    blez a1, .LBB26_2
 ; RV64-NEXT:  # %bb.1: # %entry
@@ -1911,7 +1911,7 @@ define i32 @stest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfdi at plt
+; RV32IF-NEXT:    call __fixdfdi
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    addi a3, a2, -1
 ; RV32IF-NEXT:    beqz a1, .LBB27_2
@@ -1950,7 +1950,7 @@ define i32 @stest_f64i32_mm(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfdi at plt
+; RV64IF-NEXT:    call __fixdfdi
 ; RV64IF-NEXT:    lui a1, 524288
 ; RV64IF-NEXT:    addiw a2, a1, -1
 ; RV64IF-NEXT:    blt a0, a2, .LBB27_2
@@ -2005,7 +2005,7 @@ define i32 @utest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunsdfdi at plt
+; RV32IF-NEXT:    call __fixunsdfdi
 ; RV32IF-NEXT:    seqz a1, a1
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    or a0, a1, a0
@@ -2019,7 +2019,7 @@ define i32 @utest_f64i32_mm(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixunsdfdi at plt
+; RV64IF-NEXT:    call __fixunsdfdi
 ; RV64IF-NEXT:    li a1, -1
 ; RV64IF-NEXT:    srli a1, a1, 32
 ; RV64IF-NEXT:    bltu a0, a1, .LBB28_2
@@ -2063,7 +2063,7 @@ define i32 @ustest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfdi at plt
+; RV32IF-NEXT:    call __fixdfdi
 ; RV32IF-NEXT:    bnez a1, .LBB29_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
 ; RV32IF-NEXT:    li a2, 1
@@ -2088,7 +2088,7 @@ define i32 @ustest_f64i32_mm(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfdi at plt
+; RV64IF-NEXT:    call __fixdfdi
 ; RV64IF-NEXT:    li a1, -1
 ; RV64IF-NEXT:    srli a1, a1, 32
 ; RV64IF-NEXT:    blt a0, a1, .LBB29_2
@@ -2231,8 +2231,8 @@ define i32 @stest_f16i32_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
-; RV32-NEXT:    call __fixsfdi at plt
+; RV32-NEXT:    call __extendhfsf2
+; RV32-NEXT:    call __fixsfdi
 ; RV32-NEXT:    lui a2, 524288
 ; RV32-NEXT:    addi a3, a2, -1
 ; RV32-NEXT:    beqz a1, .LBB33_2
@@ -2271,7 +2271,7 @@ define i32 @stest_f16i32_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    lui a1, 524288
 ; RV64-NEXT:    addiw a2, a1, -1
@@ -2301,8 +2301,8 @@ define i32 @utesth_f16i32_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
-; RV32-NEXT:    call __fixunssfdi at plt
+; RV32-NEXT:    call __extendhfsf2
+; RV32-NEXT:    call __fixunssfdi
 ; RV32-NEXT:    seqz a1, a1
 ; RV32-NEXT:    addi a1, a1, -1
 ; RV32-NEXT:    or a0, a1, a0
@@ -2316,7 +2316,7 @@ define i32 @utesth_f16i32_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64-NEXT:    li a1, -1
 ; RV64-NEXT:    srli a1, a1, 32
@@ -2341,8 +2341,8 @@ define i32 @ustest_f16i32_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
-; RV32-NEXT:    call __fixsfdi at plt
+; RV32-NEXT:    call __extendhfsf2
+; RV32-NEXT:    call __fixsfdi
 ; RV32-NEXT:    bnez a1, .LBB35_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    li a2, 1
@@ -2367,7 +2367,7 @@ define i32 @ustest_f16i32_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    li a1, -1
 ; RV64-NEXT:    srli a1, a1, 32
@@ -2398,7 +2398,7 @@ define i16 @stest_f64i16_mm(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfsi at plt
+; RV32IF-NEXT:    call __fixdfsi
 ; RV32IF-NEXT:    lui a1, 8
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    blt a0, a1, .LBB36_2
@@ -2420,7 +2420,7 @@ define i16 @stest_f64i16_mm(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfsi at plt
+; RV64IF-NEXT:    call __fixdfsi
 ; RV64IF-NEXT:    lui a1, 8
 ; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    blt a0, a1, .LBB36_2
@@ -2488,7 +2488,7 @@ define i16 @utest_f64i16_mm(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixunsdfsi at plt
+; RV32IF-NEXT:    call __fixunsdfsi
 ; RV32IF-NEXT:    lui a1, 16
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    bltu a0, a1, .LBB37_2
@@ -2505,7 +2505,7 @@ define i16 @utest_f64i16_mm(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixunsdfsi at plt
+; RV64IF-NEXT:    call __fixunsdfsi
 ; RV64IF-NEXT:    lui a1, 16
 ; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    bltu a0, a1, .LBB37_2
@@ -2551,7 +2551,7 @@ define i16 @ustest_f64i16_mm(double %x) {
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call __fixdfsi at plt
+; RV32IF-NEXT:    call __fixdfsi
 ; RV32IF-NEXT:    lui a1, 16
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    blt a0, a1, .LBB38_2
@@ -2571,7 +2571,7 @@ define i16 @ustest_f64i16_mm(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfsi at plt
+; RV64IF-NEXT:    call __fixdfsi
 ; RV64IF-NEXT:    lui a1, 16
 ; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    blt a0, a1, .LBB38_2
@@ -2738,7 +2738,7 @@ define i16 @stest_f16i16_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32-NEXT:    lui a1, 8
 ; RV32-NEXT:    addi a1, a1, -1
@@ -2761,7 +2761,7 @@ define i16 @stest_f16i16_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    lui a1, 8
 ; RV64-NEXT:    addiw a1, a1, -1
@@ -2792,7 +2792,7 @@ define i16 @utesth_f16i16_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -1
@@ -2810,7 +2810,7 @@ define i16 @utesth_f16i16_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64-NEXT:    sext.w a0, a0
 ; RV64-NEXT:    lui a1, 16
@@ -2836,7 +2836,7 @@ define i16 @ustest_f16i16_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -1
@@ -2857,7 +2857,7 @@ define i16 @ustest_f16i16_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
+; RV64-NEXT:    call __extendhfsf2
 ; RV64-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64-NEXT:    lui a1, 16
 ; RV64-NEXT:    addiw a1, a1, -1
@@ -2891,7 +2891,7 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    mv a2, a1
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call __fixdfti at plt
+; RV32IF-NEXT:    call __fixdfti
 ; RV32IF-NEXT:    lw a0, 20(sp)
 ; RV32IF-NEXT:    lw a2, 16(sp)
 ; RV32IF-NEXT:    lw a1, 12(sp)
@@ -2950,7 +2950,7 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV64IF-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call __fixdfti at plt
+; RV64IF-NEXT:    call __fixdfti
 ; RV64IF-NEXT:    li a2, -1
 ; RV64IF-NEXT:    srli a3, a2, 1
 ; RV64IF-NEXT:    beqz a1, .LBB45_2
@@ -2990,7 +2990,7 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call __fixdfti at plt
+; RV32IFD-NEXT:    call __fixdfti
 ; RV32IFD-NEXT:    lw a0, 20(sp)
 ; RV32IFD-NEXT:    lw a2, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
@@ -3069,7 +3069,7 @@ define i64 @utest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    mv a2, a1
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call __fixunsdfti at plt
+; RV32IF-NEXT:    call __fixunsdfti
 ; RV32IF-NEXT:    lw a0, 16(sp)
 ; RV32IF-NEXT:    lw a1, 20(sp)
 ; RV32IF-NEXT:    lw a2, 12(sp)
@@ -3094,7 +3094,7 @@ define i64 @utest_f64i64_mm(double %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixunsdfti at plt
+; RV64-NEXT:    call __fixunsdfti
 ; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
@@ -3109,7 +3109,7 @@ define i64 @utest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call __fixunsdfti at plt
+; RV32IFD-NEXT:    call __fixunsdfti
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 20(sp)
 ; RV32IFD-NEXT:    lw a2, 12(sp)
@@ -3144,7 +3144,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    mv a2, a1
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call __fixdfti at plt
+; RV32IF-NEXT:    call __fixdfti
 ; RV32IF-NEXT:    lw a0, 8(sp)
 ; RV32IF-NEXT:    lw a1, 12(sp)
 ; RV32IF-NEXT:    lw a2, 20(sp)
@@ -3179,7 +3179,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixdfti at plt
+; RV64-NEXT:    call __fixdfti
 ; RV64-NEXT:    mv a2, a1
 ; RV64-NEXT:    blez a1, .LBB47_2
 ; RV64-NEXT:  # %bb.1: # %entry
@@ -3202,7 +3202,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call __fixdfti at plt
+; RV32IFD-NEXT:    call __fixdfti
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    lw a2, 20(sp)
@@ -3246,7 +3246,7 @@ define i64 @stest_f32i64_mm(float %x) {
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a0, 20(sp)
 ; RV32-NEXT:    lw a2, 16(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
@@ -3323,7 +3323,7 @@ define i64 @utest_f32i64_mm(float %x) {
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixunssfti at plt
+; RV32-NEXT:    call __fixunssfti
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    lw a1, 20(sp)
 ; RV32-NEXT:    lw a2, 12(sp)
@@ -3348,7 +3348,7 @@ define i64 @utest_f32i64_mm(float %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixunssfti at plt
+; RV64-NEXT:    call __fixunssfti
 ; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
@@ -3370,7 +3370,7 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a0, 8(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
 ; RV32-NEXT:    lw a2, 20(sp)
@@ -3405,7 +3405,7 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __fixsfti at plt
+; RV64-NEXT:    call __fixsfti
 ; RV64-NEXT:    mv a2, a1
 ; RV64-NEXT:    blez a1, .LBB50_2
 ; RV64-NEXT:  # %bb.1: # %entry
@@ -3435,9 +3435,9 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a0, 20(sp)
 ; RV32-NEXT:    lw a2, 16(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
@@ -3496,8 +3496,8 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
-; RV64-NEXT:    call __fixsfti at plt
+; RV64-NEXT:    call __extendhfsf2
+; RV64-NEXT:    call __fixsfti
 ; RV64-NEXT:    li a2, -1
 ; RV64-NEXT:    srli a3, a2, 1
 ; RV64-NEXT:    beqz a1, .LBB51_2
@@ -3544,9 +3544,9 @@ define i64 @utesth_f16i64_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixunssfti at plt
+; RV32-NEXT:    call __fixunssfti
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    lw a1, 20(sp)
 ; RV32-NEXT:    lw a2, 12(sp)
@@ -3571,8 +3571,8 @@ define i64 @utesth_f16i64_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
-; RV64-NEXT:    call __fixunssfti at plt
+; RV64-NEXT:    call __extendhfsf2
+; RV64-NEXT:    call __fixunssfti
 ; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
@@ -3593,9 +3593,9 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call __extendhfsf2 at plt
+; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    call __fixsfti at plt
+; RV32-NEXT:    call __fixsfti
 ; RV32-NEXT:    lw a0, 8(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
 ; RV32-NEXT:    lw a2, 20(sp)
@@ -3630,8 +3630,8 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call __extendhfsf2 at plt
-; RV64-NEXT:    call __fixsfti at plt
+; RV64-NEXT:    call __extendhfsf2
+; RV64-NEXT:    call __fixsfti
 ; RV64-NEXT:    mv a2, a1
 ; RV64-NEXT:    blez a1, .LBB53_2
 ; RV64-NEXT:  # %bb.1: # %entry
diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll
index 95c4798c32ece7..bc4f89e9177489 100644
--- a/llvm/test/CodeGen/RISCV/frame-info.ll
+++ b/llvm/test/CodeGen/RISCV/frame-info.ll
@@ -64,7 +64,7 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32-NEXT:    andi a0, a0, -16
 ; RV32-NEXT:    sub a0, sp, a0
 ; RV32-NEXT:    mv sp, a0
-; RV32-NEXT:    call callee_with_args at plt
+; RV32-NEXT:    call callee_with_args
 ; RV32-NEXT:    addi sp, s0, -16
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -87,7 +87,7 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-NEXT:    andi a0, a0, -16
 ; RV64-NEXT:    sub a0, sp, a0
 ; RV64-NEXT:    mv sp, a0
-; RV64-NEXT:    call callee_with_args at plt
+; RV64-NEXT:    call callee_with_args
 ; RV64-NEXT:    addi sp, s0, -16
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -108,7 +108,7 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32-WITHFP-NEXT:    andi a0, a0, -16
 ; RV32-WITHFP-NEXT:    sub a0, sp, a0
 ; RV32-WITHFP-NEXT:    mv sp, a0
-; RV32-WITHFP-NEXT:    call callee_with_args at plt
+; RV32-WITHFP-NEXT:    call callee_with_args
 ; RV32-WITHFP-NEXT:    addi sp, s0, -16
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -131,7 +131,7 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-WITHFP-NEXT:    andi a0, a0, -16
 ; RV64-WITHFP-NEXT:    sub a0, sp, a0
 ; RV64-WITHFP-NEXT:    mv sp, a0
-; RV64-WITHFP-NEXT:    call callee_with_args at plt
+; RV64-WITHFP-NEXT:    call callee_with_args
 ; RV64-WITHFP-NEXT:    addi sp, s0, -16
 ; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -149,13 +149,13 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32-NEXT:    andi a0, a0, 1
 ; RV32-NEXT:    beqz a0, .LBB2_2
 ; RV32-NEXT:  # %bb.1: # %blue_pill
-; RV32-NEXT:    tail callee1 at plt
+; RV32-NEXT:    tail callee1
 ; RV32-NEXT:  .LBB2_2: # %red_pill
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call callee2 at plt
+; RV32-NEXT:    call callee2
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -165,13 +165,13 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-NEXT:    andi a0, a0, 1
 ; RV64-NEXT:    beqz a0, .LBB2_2
 ; RV64-NEXT:  # %bb.1: # %blue_pill
-; RV64-NEXT:    tail callee1 at plt
+; RV64-NEXT:    tail callee1
 ; RV64-NEXT:  .LBB2_2: # %red_pill
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call callee2 at plt
+; RV64-NEXT:    call callee2
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -181,7 +181,7 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32-WITHFP-NEXT:    andi a0, a0, 1
 ; RV32-WITHFP-NEXT:    beqz a0, .LBB2_2
 ; RV32-WITHFP-NEXT:  # %bb.1: # %blue_pill
-; RV32-WITHFP-NEXT:    tail callee1 at plt
+; RV32-WITHFP-NEXT:    tail callee1
 ; RV32-WITHFP-NEXT:  .LBB2_2: # %red_pill
 ; RV32-WITHFP-NEXT:    addi sp, sp, -16
 ; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 16
@@ -191,7 +191,7 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32-WITHFP-NEXT:    .cfi_offset s0, -8
 ; RV32-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32-WITHFP-NEXT:    .cfi_def_cfa s0, 0
-; RV32-WITHFP-NEXT:    call callee2 at plt
+; RV32-WITHFP-NEXT:    call callee2
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
@@ -202,7 +202,7 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-WITHFP-NEXT:    andi a0, a0, 1
 ; RV64-WITHFP-NEXT:    beqz a0, .LBB2_2
 ; RV64-WITHFP-NEXT:  # %bb.1: # %blue_pill
-; RV64-WITHFP-NEXT:    tail callee1 at plt
+; RV64-WITHFP-NEXT:    tail callee1
 ; RV64-WITHFP-NEXT:  .LBB2_2: # %red_pill
 ; RV64-WITHFP-NEXT:    addi sp, sp, -16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 16
@@ -212,7 +212,7 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-WITHFP-NEXT:    .cfi_offset s0, -16
 ; RV64-WITHFP-NEXT:    addi s0, sp, 16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa s0, 0
-; RV64-WITHFP-NEXT:    call callee2 at plt
+; RV64-WITHFP-NEXT:    call callee2
 ; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll
index 183a0f47c68c86..d50f1e55417a72 100644
--- a/llvm/test/CodeGen/RISCV/frame.ll
+++ b/llvm/test/CodeGen/RISCV/frame.ll
@@ -17,7 +17,7 @@ define i32 @test() nounwind {
 ; RV32I-FPELIM-NEXT:    sw zero, 12(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 8(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 12
-; RV32I-FPELIM-NEXT:    call test1 at plt
+; RV32I-FPELIM-NEXT:    call test1
 ; RV32I-FPELIM-NEXT:    li a0, 0
 ; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
@@ -35,7 +35,7 @@ define i32 @test() nounwind {
 ; RV32I-WITHFP-NEXT:    sw zero, -28(s0)
 ; RV32I-WITHFP-NEXT:    sw zero, -32(s0)
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -28
-; RV32I-WITHFP-NEXT:    call test1 at plt
+; RV32I-WITHFP-NEXT:    call test1
 ; RV32I-WITHFP-NEXT:    li a0, 0
 ; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
index 23379c9430accb..478c8457997ae3 100644
--- a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
+++ b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
@@ -74,7 +74,7 @@ define ptr @test_frameaddress_3_alloca() nounwind {
 ; RV32I-NEXT:    sw s0, 104(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 112
 ; RV32I-NEXT:    addi a0, s0, -108
-; RV32I-NEXT:    call notdead at plt
+; RV32I-NEXT:    call notdead
 ; RV32I-NEXT:    lw a0, -8(s0)
 ; RV32I-NEXT:    lw a0, -8(a0)
 ; RV32I-NEXT:    lw a0, -8(a0)
@@ -90,7 +90,7 @@ define ptr @test_frameaddress_3_alloca() nounwind {
 ; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 128
 ; RV64I-NEXT:    addi a0, s0, -116
-; RV64I-NEXT:    call notdead at plt
+; RV64I-NEXT:    call notdead
 ; RV64I-NEXT:    ld a0, -16(s0)
 ; RV64I-NEXT:    ld a0, -16(a0)
 ; RV64I-NEXT:    ld a0, -16(a0)
diff --git a/llvm/test/CodeGen/RISCV/ghccc-rv32.ll b/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
index bf1f41195c5d90..0f9511125adbae 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
@@ -78,7 +78,7 @@ define ghccc void @foo() nounwind {
 ; CHECK-NEXT:    lw s2, %lo(sp)(a0)
 ; CHECK-NEXT:    lui a0, %hi(base)
 ; CHECK-NEXT:    lw s1, %lo(base)(a0)
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 entry:
   %0  = load double, ptr @d6
   %1  = load double, ptr @d5
diff --git a/llvm/test/CodeGen/RISCV/ghccc-rv64.ll b/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
index 9d2091df1f1995..79afd4bc375d53 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
@@ -78,7 +78,7 @@ define ghccc void @foo() nounwind {
 ; CHECK-NEXT:    ld s2, %lo(sp)(a0)
 ; CHECK-NEXT:    lui a0, %hi(base)
 ; CHECK-NEXT:    ld s1, %lo(base)(a0)
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 entry:
   %0  = load double, ptr @d6
   %1  = load double, ptr @d5
diff --git a/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll
index 32df34fe2c0f24..6437beae090154 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll
@@ -25,7 +25,7 @@ define ghccc void @caller_float() nounwind {
 ; CHECK-NEXT:    lw s2, %lo(f2)(a0)
 ; CHECK-NEXT:    lui a0, %hi(f1)
 ; CHECK-NEXT:    lw s1, %lo(f1)(a0)
-; CHECK-NEXT:    tail callee_float at plt
+; CHECK-NEXT:    tail callee_float
 entry:
   %0  = load float, ptr @f6
   %1  = load float, ptr @f5
@@ -61,7 +61,7 @@ define ghccc void @caller_double() nounwind {
 ; CHECK-NEXT:    ld s2, %lo(d2)(a0)
 ; CHECK-NEXT:    lui a0, %hi(d1)
 ; CHECK-NEXT:    ld s1, %lo(d1)(a0)
-; CHECK-NEXT:    tail callee_double at plt
+; CHECK-NEXT:    tail callee_double
 entry:
   %0  = load double, ptr @d6
   %1  = load double, ptr @d5
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 98c732122951c4..f54adaa24b1bb5 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -47,14 +47,14 @@ define half @fadd_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -73,14 +73,14 @@ define half @fadd_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -136,14 +136,14 @@ define half @fsub_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __subsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __subsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -162,14 +162,14 @@ define half @fsub_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __subsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __subsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -225,14 +225,14 @@ define half @fmul_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __mulsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __mulsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -251,14 +251,14 @@ define half @fmul_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __mulsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __mulsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -314,14 +314,14 @@ define half @fdiv_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __divsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __divsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -340,14 +340,14 @@ define half @fdiv_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __divsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __divsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -400,9 +400,9 @@ define half @fsqrt_s(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call sqrtf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call sqrtf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -413,9 +413,9 @@ define half @fsqrt_s(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call sqrtf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call sqrtf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -577,21 +577,21 @@ define i32 @fneg_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s1, a1, -1
 ; RV32I-NEXT:    and a0, a0, s1
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s1
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a0, 524288
 ; RV32I-NEXT:    xor a0, s0, a0
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s1
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    call __eqsf2
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -608,21 +608,21 @@ define i32 @fneg_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s1, a1, -1
 ; RV64I-NEXT:    and a0, a0, s1
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s1
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a0, 524288
 ; RV64I-NEXT:    xor a0, s0, a0
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s1
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    call __eqsf2
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -699,19 +699,19 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a0, 16
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lui a1, 1048568
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    slli s1, s1, 17
@@ -738,19 +738,19 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a0, 16
 ; RV64I-NEXT:    addiw s3, a0, -1
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    lui a1, 1048568
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    slli s1, s1, 49
@@ -908,25 +908,25 @@ define half @fabs_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    slli a0, a0, 1
 ; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -945,25 +945,25 @@ define half @fabs_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    slli a0, a0, 33
 ; RV64I-NEXT:    srli a0, a0, 33
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1041,14 +1041,14 @@ define half @fmin_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call fminf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fminf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1067,14 +1067,14 @@ define half @fmin_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fminf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fminf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1132,14 +1132,14 @@ define half @fmax_s(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call fmaxf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaxf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1158,14 +1158,14 @@ define half @fmax_s(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fmaxf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaxf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1225,18 +1225,18 @@ define half @fmadd_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s3, a1, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1258,18 +1258,18 @@ define half @fmadd_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s3, a1, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1334,29 +1334,29 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a0, 16
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    and a0, a2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1378,29 +1378,29 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a0, 16
 ; RV64I-NEXT:    addiw s3, a0, -1
 ; RV64I-NEXT:    and a0, a2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1489,41 +1489,41 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui s3, 16
 ; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lui s4, 524288
 ; RV32I-NEXT:    xor a0, a0, s4
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    xor a0, a0, s4
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1547,41 +1547,41 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui s3, 16
 ; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    lui s4, 524288
 ; RV64I-NEXT:    xor a0, a0, s4
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    xor a0, a0, s4
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1691,41 +1691,41 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui s3, 16
 ; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lui s4, 524288
 ; RV32I-NEXT:    xor a0, a0, s4
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    xor a0, a0, s4
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s2
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1749,41 +1749,41 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui s3, 16
 ; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    lui s4, 524288
 ; RV64I-NEXT:    xor a0, a0, s4
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    xor a0, a0, s4
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s2
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1902,18 +1902,18 @@ define half @fnmadd_s_3(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s3, a1, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lui a1, 1048568
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -1937,18 +1937,18 @@ define half @fnmadd_s_3(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s3, a1, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    lui a1, 1048568
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
@@ -2033,18 +2033,18 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s3, a1, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lui a1, 1048568
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -2068,18 +2068,18 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s3, a1, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    lui a1, 1048568
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
@@ -2154,28 +2154,28 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s3, a1, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2197,28 +2197,28 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s3, a1, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -2304,29 +2304,29 @@ define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a0, 16
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    and a0, a1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2348,29 +2348,29 @@ define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a0, 16
 ; RV64I-NEXT:    addiw s3, a0, -1
 ; RV64I-NEXT:    and a0, a1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -2453,23 +2453,23 @@ define half @fmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s3, a1, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __mulsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __mulsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2491,23 +2491,23 @@ define half @fmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s3, a1, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __mulsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __mulsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -2582,29 +2582,29 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a0, 16
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    and a0, a2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __mulsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __mulsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __subsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __subsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2626,29 +2626,29 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a0, 16
 ; RV64I-NEXT:    addiw s3, a0, -1
 ; RV64I-NEXT:    and a0, a2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __mulsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __mulsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __subsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __subsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -2738,46 +2738,46 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui s3, 16
 ; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __mulsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __mulsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __subsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __subsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -2799,46 +2799,46 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui s3, 16
 ; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __mulsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __mulsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __subsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __subsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -2956,36 +2956,36 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui s3, 16
 ; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s2, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __mulsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __mulsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __subsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __subsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -3007,36 +3007,36 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui s3, 16
 ; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s2, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __mulsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __mulsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __subsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __subsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
index f4d632449acd9d..6699ee94793796 100644
--- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
@@ -30,7 +30,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB0_2: # %if.else
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_false:
 ; RV64IZFH:       # %bb.0:
@@ -41,7 +41,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB0_2: # %if.else
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_false:
 ; RV32IZHINX:       # %bb.0:
@@ -52,7 +52,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB0_2: # %if.else
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_false:
 ; RV64IZHINX:       # %bb.0:
@@ -63,7 +63,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB0_2: # %if.else
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_false:
 ; RV32IZFHMIN:       # %bb.0:
@@ -74,7 +74,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB0_2: # %if.else
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_false:
 ; RV64IZFHMIN:       # %bb.0:
@@ -85,7 +85,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB0_2: # %if.else
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_false:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -96,7 +96,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB0_2: # %if.else
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_false:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -107,7 +107,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB0_2: # %if.else
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp false half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -127,7 +127,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB1_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_oeq:
 ; RV64IZFH:       # %bb.0:
@@ -138,7 +138,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB1_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_oeq:
 ; RV32IZHINX:       # %bb.0:
@@ -149,7 +149,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB1_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_oeq:
 ; RV64IZHINX:       # %bb.0:
@@ -160,7 +160,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB1_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_oeq:
 ; RV32IZFHMIN:       # %bb.0:
@@ -173,7 +173,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB1_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_oeq:
 ; RV64IZFHMIN:       # %bb.0:
@@ -186,7 +186,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB1_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_oeq:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -199,7 +199,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB1_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_oeq:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -212,7 +212,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB1_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp oeq half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -235,7 +235,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB2_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_oeq_alt:
 ; RV64IZFH:       # %bb.0:
@@ -246,7 +246,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB2_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_oeq_alt:
 ; RV32IZHINX:       # %bb.0:
@@ -257,7 +257,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB2_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_oeq_alt:
 ; RV64IZHINX:       # %bb.0:
@@ -268,7 +268,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB2_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_oeq_alt:
 ; RV32IZFHMIN:       # %bb.0:
@@ -281,7 +281,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB2_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_oeq_alt:
 ; RV64IZFHMIN:       # %bb.0:
@@ -294,7 +294,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB2_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_oeq_alt:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -307,7 +307,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB2_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_oeq_alt:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -320,7 +320,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB2_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp oeq half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -340,7 +340,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB3_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_ogt:
 ; RV64IZFH:       # %bb.0:
@@ -351,7 +351,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB3_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_ogt:
 ; RV32IZHINX:       # %bb.0:
@@ -362,7 +362,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB3_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_ogt:
 ; RV64IZHINX:       # %bb.0:
@@ -373,7 +373,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB3_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ogt:
 ; RV32IZFHMIN:       # %bb.0:
@@ -386,7 +386,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB3_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ogt:
 ; RV64IZFHMIN:       # %bb.0:
@@ -399,7 +399,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB3_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_ogt:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -412,7 +412,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB3_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_ogt:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -425,7 +425,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB3_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp ogt half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -445,7 +445,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB4_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_oge:
 ; RV64IZFH:       # %bb.0:
@@ -456,7 +456,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB4_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_oge:
 ; RV32IZHINX:       # %bb.0:
@@ -467,7 +467,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB4_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_oge:
 ; RV64IZHINX:       # %bb.0:
@@ -478,7 +478,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB4_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_oge:
 ; RV32IZFHMIN:       # %bb.0:
@@ -491,7 +491,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB4_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_oge:
 ; RV64IZFHMIN:       # %bb.0:
@@ -504,7 +504,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB4_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_oge:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -517,7 +517,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB4_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_oge:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -530,7 +530,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB4_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp oge half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -550,7 +550,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB5_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_olt:
 ; RV64IZFH:       # %bb.0:
@@ -561,7 +561,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB5_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_olt:
 ; RV32IZHINX:       # %bb.0:
@@ -572,7 +572,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB5_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_olt:
 ; RV64IZHINX:       # %bb.0:
@@ -583,7 +583,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB5_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_olt:
 ; RV32IZFHMIN:       # %bb.0:
@@ -596,7 +596,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB5_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_olt:
 ; RV64IZFHMIN:       # %bb.0:
@@ -609,7 +609,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB5_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_olt:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -622,7 +622,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB5_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_olt:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -635,7 +635,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB5_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp olt half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -655,7 +655,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB6_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_ole:
 ; RV64IZFH:       # %bb.0:
@@ -666,7 +666,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB6_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_ole:
 ; RV32IZHINX:       # %bb.0:
@@ -677,7 +677,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB6_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_ole:
 ; RV64IZHINX:       # %bb.0:
@@ -688,7 +688,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB6_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ole:
 ; RV32IZFHMIN:       # %bb.0:
@@ -701,7 +701,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB6_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ole:
 ; RV64IZFHMIN:       # %bb.0:
@@ -714,7 +714,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB6_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_ole:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -727,7 +727,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB6_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_ole:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -740,7 +740,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB6_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp ole half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -762,7 +762,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB7_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_one:
 ; RV64IZFH:       # %bb.0:
@@ -775,7 +775,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB7_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_one:
 ; RV32IZHINX:       # %bb.0:
@@ -788,7 +788,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB7_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_one:
 ; RV64IZHINX:       # %bb.0:
@@ -801,7 +801,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB7_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_one:
 ; RV32IZFHMIN:       # %bb.0:
@@ -816,7 +816,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB7_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_one:
 ; RV64IZFHMIN:       # %bb.0:
@@ -831,7 +831,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB7_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_one:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -846,7 +846,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB7_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_one:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -861,7 +861,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB7_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp one half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -883,7 +883,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB8_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_ord:
 ; RV64IZFH:       # %bb.0:
@@ -896,7 +896,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB8_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_ord:
 ; RV32IZHINX:       # %bb.0:
@@ -909,7 +909,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB8_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_ord:
 ; RV64IZHINX:       # %bb.0:
@@ -922,7 +922,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB8_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ord:
 ; RV32IZFHMIN:       # %bb.0:
@@ -937,7 +937,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB8_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ord:
 ; RV64IZFHMIN:       # %bb.0:
@@ -952,7 +952,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB8_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_ord:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -967,7 +967,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB8_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_ord:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -982,7 +982,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB8_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp ord half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1004,7 +1004,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB9_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_ueq:
 ; RV64IZFH:       # %bb.0:
@@ -1017,7 +1017,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB9_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_ueq:
 ; RV32IZHINX:       # %bb.0:
@@ -1030,7 +1030,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB9_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_ueq:
 ; RV64IZHINX:       # %bb.0:
@@ -1043,7 +1043,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB9_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ueq:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1058,7 +1058,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB9_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ueq:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1073,7 +1073,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB9_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_ueq:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1088,7 +1088,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB9_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_ueq:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1103,7 +1103,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB9_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp ueq half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1123,7 +1123,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB10_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_ugt:
 ; RV64IZFH:       # %bb.0:
@@ -1134,7 +1134,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB10_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_ugt:
 ; RV32IZHINX:       # %bb.0:
@@ -1145,7 +1145,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB10_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_ugt:
 ; RV64IZHINX:       # %bb.0:
@@ -1156,7 +1156,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB10_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ugt:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1169,7 +1169,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB10_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ugt:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1182,7 +1182,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB10_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_ugt:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1195,7 +1195,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB10_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_ugt:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1208,7 +1208,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB10_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp ugt half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1228,7 +1228,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB11_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_uge:
 ; RV64IZFH:       # %bb.0:
@@ -1239,7 +1239,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB11_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_uge:
 ; RV32IZHINX:       # %bb.0:
@@ -1250,7 +1250,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB11_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_uge:
 ; RV64IZHINX:       # %bb.0:
@@ -1261,7 +1261,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB11_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_uge:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1274,7 +1274,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB11_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_uge:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1287,7 +1287,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB11_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_uge:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1300,7 +1300,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB11_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_uge:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1313,7 +1313,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB11_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp uge half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1333,7 +1333,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB12_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_ult:
 ; RV64IZFH:       # %bb.0:
@@ -1344,7 +1344,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB12_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_ult:
 ; RV32IZHINX:       # %bb.0:
@@ -1355,7 +1355,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB12_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_ult:
 ; RV64IZHINX:       # %bb.0:
@@ -1366,7 +1366,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB12_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ult:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1379,7 +1379,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB12_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ult:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1392,7 +1392,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB12_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_ult:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1405,7 +1405,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB12_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_ult:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1418,7 +1418,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB12_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp ult half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1438,7 +1438,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB13_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_ule:
 ; RV64IZFH:       # %bb.0:
@@ -1449,7 +1449,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB13_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_ule:
 ; RV32IZHINX:       # %bb.0:
@@ -1460,7 +1460,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB13_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_ule:
 ; RV64IZHINX:       # %bb.0:
@@ -1471,7 +1471,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB13_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ule:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1484,7 +1484,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB13_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ule:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1497,7 +1497,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB13_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_ule:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1510,7 +1510,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB13_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_ule:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1523,7 +1523,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB13_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp ule half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1543,7 +1543,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB14_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_une:
 ; RV64IZFH:       # %bb.0:
@@ -1554,7 +1554,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB14_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_une:
 ; RV32IZHINX:       # %bb.0:
@@ -1565,7 +1565,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB14_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_une:
 ; RV64IZHINX:       # %bb.0:
@@ -1576,7 +1576,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB14_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_une:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1589,7 +1589,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB14_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_une:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1602,7 +1602,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB14_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_une:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1615,7 +1615,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB14_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_une:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1628,7 +1628,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB14_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp une half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1650,7 +1650,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB15_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_uno:
 ; RV64IZFH:       # %bb.0:
@@ -1663,7 +1663,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB15_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_uno:
 ; RV32IZHINX:       # %bb.0:
@@ -1676,7 +1676,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB15_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_uno:
 ; RV64IZHINX:       # %bb.0:
@@ -1689,7 +1689,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB15_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_uno:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1704,7 +1704,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB15_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_uno:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1719,7 +1719,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB15_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_uno:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1734,7 +1734,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB15_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_uno:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1749,7 +1749,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB15_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp uno half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -1769,7 +1769,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:  .LBB16_2: # %if.then
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call abort at plt
+; RV32IZFH-NEXT:    call abort
 ;
 ; RV64IZFH-LABEL: br_fcmp_true:
 ; RV64IZFH:       # %bb.0:
@@ -1780,7 +1780,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:  .LBB16_2: # %if.then
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call abort at plt
+; RV64IZFH-NEXT:    call abort
 ;
 ; RV32IZHINX-LABEL: br_fcmp_true:
 ; RV32IZHINX:       # %bb.0:
@@ -1791,7 +1791,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:  .LBB16_2: # %if.then
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call abort at plt
+; RV32IZHINX-NEXT:    call abort
 ;
 ; RV64IZHINX-LABEL: br_fcmp_true:
 ; RV64IZHINX:       # %bb.0:
@@ -1802,7 +1802,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:  .LBB16_2: # %if.then
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call abort at plt
+; RV64IZHINX-NEXT:    call abort
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_true:
 ; RV32IZFHMIN:       # %bb.0:
@@ -1813,7 +1813,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:  .LBB16_2: # %if.then
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    call abort at plt
+; RV32IZFHMIN-NEXT:    call abort
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_true:
 ; RV64IZFHMIN:       # %bb.0:
@@ -1824,7 +1824,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:  .LBB16_2: # %if.then
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    call abort at plt
+; RV64IZFHMIN-NEXT:    call abort
 ;
 ; RV32IZHINXMIN-LABEL: br_fcmp_true:
 ; RV32IZHINXMIN:       # %bb.0:
@@ -1835,7 +1835,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:  .LBB16_2: # %if.then
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT:    call abort at plt
+; RV32IZHINXMIN-NEXT:    call abort
 ;
 ; RV64IZHINXMIN-LABEL: br_fcmp_true:
 ; RV64IZHINXMIN:       # %bb.0:
@@ -1846,7 +1846,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:  .LBB16_2: # %if.then
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT:    call abort at plt
+; RV64IZHINXMIN-NEXT:    call abort
   %1 = fcmp true half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index f6f85d3afeb00b..f03a020762bb3c 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -460,7 +460,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -474,7 +474,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -488,7 +488,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __fixhfdi at plt
+; RV32IDZFH-NEXT:    call __fixhfdi
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -502,7 +502,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZDINXZHINX-NEXT:    call __fixhfdi
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -516,7 +516,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __fixhfdi at plt
+; CHECK32-IZFHMIN-NEXT:    call __fixhfdi
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -531,7 +531,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __fixhfdi at plt
+; CHECK32-IZHINXMIN-NEXT:    call __fixhfdi
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -546,7 +546,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixhfdi at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixhfdi
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -566,7 +566,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -580,7 +580,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -594,7 +594,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __fixunshfdi at plt
+; RV32IDZFH-NEXT:    call __fixunshfdi
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -608,7 +608,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZDINXZHINX-NEXT:    call __fixunshfdi
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -622,7 +622,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __fixunshfdi at plt
+; CHECK32-IZFHMIN-NEXT:    call __fixunshfdi
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -637,7 +637,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __fixunshfdi at plt
+; CHECK32-IZHINXMIN-NEXT:    call __fixunshfdi
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -652,7 +652,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixunshfdi at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixunshfdi
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -1359,7 +1359,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __floatdihf at plt
+; RV32IZFH-NEXT:    call __floatdihf
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -1373,7 +1373,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __floatdihf at plt
+; RV32IZHINX-NEXT:    call __floatdihf
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -1387,7 +1387,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __floatdihf at plt
+; RV32IDZFH-NEXT:    call __floatdihf
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -1401,7 +1401,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __floatdihf at plt
+; RV32IZDINXZHINX-NEXT:    call __floatdihf
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -1415,7 +1415,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __floatdihf at plt
+; CHECK32-IZFHMIN-NEXT:    call __floatdihf
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -1430,7 +1430,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __floatdihf at plt
+; CHECK32-IZHINXMIN-NEXT:    call __floatdihf
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -1445,7 +1445,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatdihf at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatdihf
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -1465,7 +1465,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __floatundihf at plt
+; RV32IZFH-NEXT:    call __floatundihf
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -1479,7 +1479,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __floatundihf at plt
+; RV32IZHINX-NEXT:    call __floatundihf
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -1493,7 +1493,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __floatundihf at plt
+; RV32IDZFH-NEXT:    call __floatundihf
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -1507,7 +1507,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __floatundihf at plt
+; RV32IZDINXZHINX-NEXT:    call __floatundihf
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -1521,7 +1521,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __floatundihf at plt
+; CHECK32-IZFHMIN-NEXT:    call __floatundihf
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -1536,7 +1536,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __floatundihf at plt
+; CHECK32-IZHINXMIN-NEXT:    call __floatundihf
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -1551,7 +1551,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatundihf at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatundihf
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -1701,7 +1701,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __truncdfhf2 at plt
+; RV32IZFH-NEXT:    call __truncdfhf2
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -1710,7 +1710,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call __truncdfhf2 at plt
+; RV64IZFH-NEXT:    call __truncdfhf2
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
@@ -1719,7 +1719,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __truncdfhf2 at plt
+; RV32IZHINX-NEXT:    call __truncdfhf2
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -1728,7 +1728,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call __truncdfhf2 at plt
+; RV64IZHINX-NEXT:    call __truncdfhf2
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
 ; RV64IZHINX-NEXT:    ret
@@ -1763,7 +1763,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; RV32IFZFHMIN:       # %bb.0:
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IFZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFZFHMIN-NEXT:    call __truncdfhf2 at plt
+; RV32IFZFHMIN-NEXT:    call __truncdfhf2
 ; RV32IFZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IFZFHMIN-NEXT:    ret
@@ -1772,7 +1772,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; RV64IFZFHMIN:       # %bb.0:
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IFZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFZFHMIN-NEXT:    call __truncdfhf2 at plt
+; RV64IFZFHMIN-NEXT:    call __truncdfhf2
 ; RV64IFZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64IFZFHMIN-NEXT:    ret
@@ -1781,7 +1781,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __truncdfhf2 at plt
+; CHECK32-IZHINXMIN-NEXT:    call __truncdfhf2
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -1790,7 +1790,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
 ; CHECK64-IZHINXMIN:       # %bb.0:
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK64-IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK64-IZHINXMIN-NEXT:    call __truncdfhf2 at plt
+; CHECK64-IZHINXMIN-NEXT:    call __truncdfhf2
 ; CHECK64-IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK64-IZHINXMIN-NEXT:    ret
@@ -1831,7 +1831,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call __extendsfdf2 at plt
+; RV32IZFH-NEXT:    call __extendsfdf2
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -1841,7 +1841,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call __extendsfdf2 at plt
+; RV64IZFH-NEXT:    call __extendsfdf2
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
@@ -1851,7 +1851,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call __extendsfdf2 at plt
+; RV32IZHINX-NEXT:    call __extendsfdf2
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -1861,7 +1861,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call __extendsfdf2 at plt
+; RV64IZHINX-NEXT:    call __extendsfdf2
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
 ; RV64IZHINX-NEXT:    ret
@@ -1897,7 +1897,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IFZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IFZFHMIN-NEXT:    call __extendsfdf2 at plt
+; RV32IFZFHMIN-NEXT:    call __extendsfdf2
 ; RV32IFZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IFZFHMIN-NEXT:    ret
@@ -1907,7 +1907,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IFZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IFZFHMIN-NEXT:    call __extendsfdf2 at plt
+; RV64IFZFHMIN-NEXT:    call __extendsfdf2
 ; RV64IFZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64IFZFHMIN-NEXT:    ret
@@ -1917,7 +1917,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK32-IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; CHECK32-IZHINXMIN-NEXT:    call __extendsfdf2 at plt
+; CHECK32-IZHINXMIN-NEXT:    call __extendsfdf2
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -1927,7 +1927,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK64-IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK64-IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; CHECK64-IZHINXMIN-NEXT:    call __extendsfdf2 at plt
+; CHECK64-IZHINXMIN-NEXT:    call __extendsfdf2
 ; CHECK64-IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK64-IZHINXMIN-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 2d3f40e15fe432..daaceed3941c53 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -91,8 +91,8 @@ define i16 @fcvt_si_h(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -103,8 +103,8 @@ define i16 @fcvt_si_h(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -113,7 +113,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -124,7 +124,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -135,7 +135,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -145,7 +145,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -316,13 +316,13 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 815104
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    bgez s2, .LBB1_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -331,7 +331,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    lui a0, 290816
 ; RV32I-NEXT:    addi a1, a0, -512
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB1_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    lui s1, 8
@@ -339,7 +339,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32I-NEXT:  .LBB1_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -359,13 +359,13 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 815104
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    bgez s2, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -374,7 +374,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    lui a0, 290816
 ; RV64I-NEXT:    addiw a1, a0, -512
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB1_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    lui s1, 8
@@ -382,7 +382,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64I-NEXT:  .LBB1_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -397,7 +397,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    feq.s a0, fa5, fa5
 ; RV32ID-ILP32-NEXT:    neg a0, a0
@@ -417,7 +417,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    feq.s a0, fa5, fa5
 ; RV64ID-LP64-NEXT:    lui a1, %hi(.LCPI1_0)
@@ -437,7 +437,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    feq.s a0, fa0, fa0
 ; RV32ID-NEXT:    neg a0, a0
 ; RV32ID-NEXT:    lui a1, %hi(.LCPI1_0)
@@ -456,7 +456,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    feq.s a0, fa0, fa0
 ; RV64ID-NEXT:    lui a1, %hi(.LCPI1_0)
 ; RV64ID-NEXT:    flw fa5, %lo(.LCPI1_0)(a1)
@@ -609,8 +609,8 @@ define i16 @fcvt_ui_h(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -621,8 +621,8 @@ define i16 @fcvt_ui_h(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -631,7 +631,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -642,7 +642,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -653,7 +653,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -663,7 +663,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -804,18 +804,18 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    lui s0, 16
 ; RV32I-NEXT:    addi s0, s0, -1
 ; RV32I-NEXT:    and a0, a0, s0
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a0, 292864
 ; RV32I-NEXT:    addi a1, a0, -256
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    bgtz a0, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    slti a0, s2, 0
@@ -842,18 +842,18 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    lui s0, 16
 ; RV64I-NEXT:    addiw s0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s0
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    lui a0, 292864
 ; RV64I-NEXT:    addiw a1, a0, -256
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    bgtz a0, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    slti a0, s2, 0
@@ -873,7 +873,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV32ID-ILP32-NEXT:    flw fa5, %lo(.LCPI3_0)(a1)
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, a0
@@ -889,7 +889,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV64ID-LP64-NEXT:    flw fa5, %lo(.LCPI3_0)(a1)
 ; RV64ID-LP64-NEXT:    fmv.w.x fa4, a0
@@ -905,7 +905,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32ID-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
 ; RV32ID-NEXT:    fmv.w.x fa4, zero
@@ -920,7 +920,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV64ID-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
 ; RV64ID-NEXT:    fmv.w.x fa4, zero
@@ -1030,8 +1030,8 @@ define i32 @fcvt_w_h(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1042,8 +1042,8 @@ define i32 @fcvt_w_h(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1052,7 +1052,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1063,7 +1063,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1074,7 +1074,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -1084,7 +1084,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -1185,13 +1185,13 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 847872
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui s3, 524288
 ; RV32I-NEXT:    bgez s2, .LBB5_2
@@ -1201,14 +1201,14 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    lui a1, 323584
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB5_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    addi s1, s3, -1
 ; RV32I-NEXT:  .LBB5_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -1230,13 +1230,13 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 847872
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bgez s2, .LBB5_2
@@ -1246,14 +1246,14 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    lui a1, 323584
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB5_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    addiw s1, s3, -1
 ; RV64I-NEXT:  .LBB5_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -1269,7 +1269,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    feq.s a1, fa5, fa5
@@ -1284,7 +1284,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    feq.s a1, fa5, fa5
@@ -1299,7 +1299,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32ID-NEXT:    feq.s a1, fa0, fa0
 ; RV32ID-NEXT:    seqz a1, a1
@@ -1313,7 +1313,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV64ID-NEXT:    feq.s a1, fa0, fa0
 ; RV64ID-NEXT:    seqz a1, a1
@@ -1420,8 +1420,8 @@ define i32 @fcvt_wu_h(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1432,8 +1432,8 @@ define i32 @fcvt_wu_h(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1442,7 +1442,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1453,7 +1453,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1464,7 +1464,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -1474,7 +1474,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -1563,8 +1563,8 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    seqz a1, a0
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1577,8 +1577,8 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    seqz a1, a0
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1589,7 +1589,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    seqz a1, a0
@@ -1602,7 +1602,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    seqz a1, a0
@@ -1615,7 +1615,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32ID-NEXT:    seqz a1, a0
 ; RV32ID-NEXT:    add a0, a0, a1
@@ -1627,7 +1627,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64ID-NEXT:    seqz a1, a0
 ; RV64ID-NEXT:    add a0, a0, a1
@@ -1778,20 +1778,20 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 325632
 ; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    and a0, s2, a0
 ; RV32I-NEXT:    or a0, s1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1810,18 +1810,18 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a1, 325632
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB8_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, -1
@@ -1843,7 +1843,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    feq.s a1, fa5, fa5
@@ -1858,7 +1858,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    feq.s a1, fa5, fa5
@@ -1875,7 +1875,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32ID-NEXT:    feq.s a1, fa0, fa0
 ; RV32ID-NEXT:    seqz a1, a1
@@ -1889,7 +1889,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV64ID-NEXT:    feq.s a1, fa0, fa0
 ; RV64ID-NEXT:    seqz a1, a1
@@ -1977,7 +1977,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -1991,7 +1991,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __fixhfdi at plt
+; RV32IDZFH-NEXT:    call __fixhfdi
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -2005,7 +2005,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -2019,7 +2019,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZDINXZHINX-NEXT:    call __fixhfdi
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -2035,8 +2035,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfdi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2047,8 +2047,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2057,8 +2057,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
-; RV32ID-ILP32-NEXT:    call __fixsfdi at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
+; RV32ID-ILP32-NEXT:    call __fixsfdi
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-ILP32-NEXT:    addi sp, sp, 16
 ; RV32ID-ILP32-NEXT:    ret
@@ -2067,7 +2067,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2078,8 +2078,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
-; RV32ID-NEXT:    call __fixsfdi at plt
+; RV32ID-NEXT:    call __extendhfsf2
+; RV32ID-NEXT:    call __fixsfdi
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
 ; RV32ID-NEXT:    ret
@@ -2088,7 +2088,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -2098,7 +2098,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __fixhfdi at plt
+; CHECK32-IZFHMIN-NEXT:    call __fixhfdi
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -2113,7 +2113,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __fixhfdi at plt
+; CHECK32-IZHINXMIN-NEXT:    call __fixhfdi
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -2128,7 +2128,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixhfdi at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixhfdi
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -2154,7 +2154,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixsfdi at plt
+; RV32IZFH-NEXT:    call __fixsfdi
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    lui a2, 524288
 ; RV32IZFH-NEXT:    beqz s0, .LBB10_2
@@ -2202,7 +2202,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IDZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IDZFH-NEXT:    fmv.s fa0, fs0
-; RV32IDZFH-NEXT:    call __fixsfdi at plt
+; RV32IDZFH-NEXT:    call __fixsfdi
 ; RV32IDZFH-NEXT:    lui a4, 524288
 ; RV32IDZFH-NEXT:    lui a2, 524288
 ; RV32IDZFH-NEXT:    beqz s0, .LBB10_2
@@ -2257,7 +2257,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZHINX-NEXT:    fle.s s3, a0, s0
 ; RV32IZHINX-NEXT:    neg s4, s3
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZHINX-NEXT:    call __fixsfdi
 ; RV32IZHINX-NEXT:    and a0, s4, a0
 ; RV32IZHINX-NEXT:    or a0, s2, a0
 ; RV32IZHINX-NEXT:    feq.s a2, s0, s0
@@ -2310,7 +2310,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZDINXZHINX-NEXT:    fle.s s3, a0, s0
 ; RV32IZDINXZHINX-NEXT:    neg s4, s3
 ; RV32IZDINXZHINX-NEXT:    mv a0, s0
-; RV32IZDINXZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZDINXZHINX-NEXT:    call __fixsfdi
 ; RV32IZDINXZHINX-NEXT:    and a0, s4, a0
 ; RV32IZDINXZHINX-NEXT:    or a0, s2, a0
 ; RV32IZDINXZHINX-NEXT:    feq.s a2, s0, s0
@@ -2355,13 +2355,13 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a1, 913408
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __fixsfdi at plt
+; RV32I-NEXT:    call __fixsfdi
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    lui s5, 524288
@@ -2372,7 +2372,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    lui a1, 389120
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    blez a0, .LBB10_4
 ; RV32I-NEXT:  # %bb.3: # %start
@@ -2380,7 +2380,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32I-NEXT:  .LBB10_4: # %start
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a1, a0, s3
@@ -2411,13 +2411,13 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 913408
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li s3, -1
 ; RV64I-NEXT:    bgez s2, .LBB10_2
@@ -2427,14 +2427,14 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    lui a1, 389120
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB10_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    srli s1, s3, 1
 ; RV64I-NEXT:  .LBB10_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -2451,13 +2451,13 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, a0
 ; RV32ID-ILP32-NEXT:    lui a1, 913408
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a1
 ; RV32ID-ILP32-NEXT:    fsw fa4, 4(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    fle.s s0, fa5, fa4
-; RV32ID-ILP32-NEXT:    call __fixsfdi at plt
+; RV32ID-ILP32-NEXT:    call __fixsfdi
 ; RV32ID-ILP32-NEXT:    lui a4, 524288
 ; RV32ID-ILP32-NEXT:    lui a2, 524288
 ; RV32ID-ILP32-NEXT:    beqz s0, .LBB10_2
@@ -2490,7 +2490,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    feq.s a1, fa5, fa5
@@ -2507,12 +2507,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fmv.s fs0, fa0
 ; RV32ID-NEXT:    lui a0, 913408
 ; RV32ID-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-NEXT:    fle.s s0, fa5, fa0
-; RV32ID-NEXT:    call __fixsfdi at plt
+; RV32ID-NEXT:    call __fixsfdi
 ; RV32ID-NEXT:    lui a4, 524288
 ; RV32ID-NEXT:    lui a2, 524288
 ; RV32ID-NEXT:    beqz s0, .LBB10_2
@@ -2544,7 +2544,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64ID-NEXT:    feq.s a1, fa0, fa0
 ; RV64ID-NEXT:    seqz a1, a1
@@ -2565,7 +2565,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IFZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IFZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IFZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IFZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IFZFHMIN-NEXT:    call __fixsfdi
 ; RV32IFZFHMIN-NEXT:    lui a4, 524288
 ; RV32IFZFHMIN-NEXT:    lui a2, 524288
 ; RV32IFZFHMIN-NEXT:    beqz s0, .LBB10_2
@@ -2614,7 +2614,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IDZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IDZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IDZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IDZFHMIN-NEXT:    call __fixsfdi
 ; RV32IDZFHMIN-NEXT:    lui a4, 524288
 ; RV32IDZFHMIN-NEXT:    lui a2, 524288
 ; RV32IDZFHMIN-NEXT:    beqz s0, .LBB10_2
@@ -2660,7 +2660,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; CHECK32-IZHINXMIN-NEXT:    fle.s s3, a0, s0
 ; CHECK32-IZHINXMIN-NEXT:    neg s4, s3
 ; CHECK32-IZHINXMIN-NEXT:    mv a0, s0
-; CHECK32-IZHINXMIN-NEXT:    call __fixsfdi at plt
+; CHECK32-IZHINXMIN-NEXT:    call __fixsfdi
 ; CHECK32-IZHINXMIN-NEXT:    and a0, s4, a0
 ; CHECK32-IZHINXMIN-NEXT:    or a0, s2, a0
 ; CHECK32-IZHINXMIN-NEXT:    feq.s a2, s0, s0
@@ -2714,7 +2714,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; CHECK32-IZDINXZHINXMIN-NEXT:    fle.s s3, a0, s0
 ; CHECK32-IZDINXZHINXMIN-NEXT:    neg s4, s3
 ; CHECK32-IZDINXZHINXMIN-NEXT:    mv a0, s0
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixsfdi at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixsfdi
 ; CHECK32-IZDINXZHINXMIN-NEXT:    and a0, s4, a0
 ; CHECK32-IZDINXZHINXMIN-NEXT:    or a0, s2, a0
 ; CHECK32-IZDINXZHINXMIN-NEXT:    feq.s a2, s0, s0
@@ -2760,7 +2760,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -2774,7 +2774,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __fixunshfdi at plt
+; RV32IDZFH-NEXT:    call __fixunshfdi
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -2788,7 +2788,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -2802,7 +2802,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZDINXZHINX-NEXT:    call __fixunshfdi
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -2818,8 +2818,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixunssfdi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixunssfdi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2830,8 +2830,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2840,8 +2840,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
-; RV32ID-ILP32-NEXT:    call __fixunssfdi at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
+; RV32ID-ILP32-NEXT:    call __fixunssfdi
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-ILP32-NEXT:    addi sp, sp, 16
 ; RV32ID-ILP32-NEXT:    ret
@@ -2850,7 +2850,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2861,8 +2861,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
-; RV32ID-NEXT:    call __fixunssfdi at plt
+; RV32ID-NEXT:    call __extendhfsf2
+; RV32ID-NEXT:    call __fixunssfdi
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
 ; RV32ID-NEXT:    ret
@@ -2871,7 +2871,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -2881,7 +2881,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __fixunshfdi at plt
+; CHECK32-IZFHMIN-NEXT:    call __fixunshfdi
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -2896,7 +2896,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __fixunshfdi at plt
+; CHECK32-IZHINXMIN-NEXT:    call __fixunshfdi
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -2911,7 +2911,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixunshfdi at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixunshfdi
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -2940,7 +2940,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, zero
 ; RV32IZFH-NEXT:    fle.s a0, fa5, fa0
 ; RV32IZFH-NEXT:    neg s1, a0
-; RV32IZFH-NEXT:    call __fixunssfdi at plt
+; RV32IZFH-NEXT:    call __fixunssfdi
 ; RV32IZFH-NEXT:    and a0, s1, a0
 ; RV32IZFH-NEXT:    or a0, s0, a0
 ; RV32IZFH-NEXT:    and a1, s1, a1
@@ -2974,7 +2974,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    fmv.w.x fa5, zero
 ; RV32IDZFH-NEXT:    fle.s a0, fa5, fa0
 ; RV32IDZFH-NEXT:    neg s1, a0
-; RV32IDZFH-NEXT:    call __fixunssfdi at plt
+; RV32IDZFH-NEXT:    call __fixunssfdi
 ; RV32IDZFH-NEXT:    and a0, s1, a0
 ; RV32IDZFH-NEXT:    or a0, s0, a0
 ; RV32IDZFH-NEXT:    and a1, s1, a1
@@ -3007,7 +3007,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IZHINX-NEXT:    neg s0, a1
 ; RV32IZHINX-NEXT:    fle.s a1, zero, a0
 ; RV32IZHINX-NEXT:    neg s1, a1
-; RV32IZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZHINX-NEXT:    call __fixunssfdi
 ; RV32IZHINX-NEXT:    and a0, s1, a0
 ; RV32IZHINX-NEXT:    or a0, s0, a0
 ; RV32IZHINX-NEXT:    and a1, s1, a1
@@ -3040,7 +3040,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IZDINXZHINX-NEXT:    neg s0, a1
 ; RV32IZDINXZHINX-NEXT:    fle.s a1, zero, a0
 ; RV32IZDINXZHINX-NEXT:    neg s1, a1
-; RV32IZDINXZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZDINXZHINX-NEXT:    call __fixunssfdi
 ; RV32IZDINXZHINX-NEXT:    and a0, s1, a0
 ; RV32IZDINXZHINX-NEXT:    or a0, s0, a0
 ; RV32IZDINXZHINX-NEXT:    and a1, s1, a1
@@ -3067,20 +3067,20 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 391168
 ; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixunssfdi at plt
+; RV32I-NEXT:    call __fixunssfdi
 ; RV32I-NEXT:    and a0, s2, a0
 ; RV32I-NEXT:    or a0, s1, a0
 ; RV32I-NEXT:    and a1, s2, a1
@@ -3101,20 +3101,20 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 391168
 ; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    sgtz a0, a0
 ; RV64I-NEXT:    neg s1, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    slti a0, a0, 0
 ; RV64I-NEXT:    addi s2, a0, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    and a0, s2, a0
 ; RV64I-NEXT:    or a0, s1, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -3130,7 +3130,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    lui a1, %hi(.LCPI12_0)
 ; RV32ID-ILP32-NEXT:    flw fa5, %lo(.LCPI12_0)(a1)
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, a0
@@ -3139,7 +3139,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, zero
 ; RV32ID-ILP32-NEXT:    fle.s a1, fa5, fa4
 ; RV32ID-ILP32-NEXT:    neg s1, a1
-; RV32ID-ILP32-NEXT:    call __fixunssfdi at plt
+; RV32ID-ILP32-NEXT:    call __fixunssfdi
 ; RV32ID-ILP32-NEXT:    and a0, s1, a0
 ; RV32ID-ILP32-NEXT:    or a0, s0, a0
 ; RV32ID-ILP32-NEXT:    and a1, s1, a1
@@ -3154,7 +3154,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    feq.s a1, fa5, fa5
@@ -3171,7 +3171,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    lui a0, %hi(.LCPI12_0)
 ; RV32ID-NEXT:    flw fa5, %lo(.LCPI12_0)(a0)
 ; RV32ID-NEXT:    flt.s a0, fa5, fa0
@@ -3179,7 +3179,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32ID-NEXT:    fmv.w.x fa5, zero
 ; RV32ID-NEXT:    fle.s a0, fa5, fa0
 ; RV32ID-NEXT:    neg s1, a0
-; RV32ID-NEXT:    call __fixunssfdi at plt
+; RV32ID-NEXT:    call __fixunssfdi
 ; RV32ID-NEXT:    and a0, s1, a0
 ; RV32ID-NEXT:    or a0, s0, a0
 ; RV32ID-NEXT:    and a1, s1, a1
@@ -3194,7 +3194,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64ID-NEXT:    feq.s a1, fa0, fa0
 ; RV64ID-NEXT:    seqz a1, a1
@@ -3218,7 +3218,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa5, zero
 ; CHECK32-IZFHMIN-NEXT:    fle.s a0, fa5, fa0
 ; CHECK32-IZFHMIN-NEXT:    neg s1, a0
-; CHECK32-IZFHMIN-NEXT:    call __fixunssfdi at plt
+; CHECK32-IZFHMIN-NEXT:    call __fixunssfdi
 ; CHECK32-IZFHMIN-NEXT:    and a0, s1, a0
 ; CHECK32-IZFHMIN-NEXT:    or a0, s0, a0
 ; CHECK32-IZFHMIN-NEXT:    and a1, s1, a1
@@ -3252,7 +3252,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; CHECK32-IZHINXMIN-NEXT:    neg s0, a1
 ; CHECK32-IZHINXMIN-NEXT:    fle.s a1, zero, a0
 ; CHECK32-IZHINXMIN-NEXT:    neg s1, a1
-; CHECK32-IZHINXMIN-NEXT:    call __fixunssfdi at plt
+; CHECK32-IZHINXMIN-NEXT:    call __fixunssfdi
 ; CHECK32-IZHINXMIN-NEXT:    and a0, s1, a0
 ; CHECK32-IZHINXMIN-NEXT:    or a0, s0, a0
 ; CHECK32-IZHINXMIN-NEXT:    and a1, s1, a1
@@ -3286,7 +3286,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; CHECK32-IZDINXZHINXMIN-NEXT:    neg s0, a1
 ; CHECK32-IZDINXZHINXMIN-NEXT:    fle.s a1, zero, a0
 ; CHECK32-IZDINXZHINXMIN-NEXT:    neg s1, a1
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixunssfdi at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __fixunssfdi
 ; CHECK32-IZDINXZHINXMIN-NEXT:    and a0, s1, a0
 ; CHECK32-IZDINXZHINXMIN-NEXT:    or a0, s0, a0
 ; CHECK32-IZDINXZHINXMIN-NEXT:    and a1, s1, a1
@@ -3375,8 +3375,8 @@ define half @fcvt_h_si(i16 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
-; RV32I-NEXT:    call __floatsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3387,8 +3387,8 @@ define half @fcvt_h_si(i16 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
-; RV64I-NEXT:    call __floatsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3401,7 +3401,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
 ; RV32ID-ILP32-NEXT:    srai a0, a0, 16
 ; RV32ID-ILP32-NEXT:    fcvt.s.w fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3416,7 +3416,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
 ; RV64ID-LP64-NEXT:    srai a0, a0, 48
 ; RV64ID-LP64-NEXT:    fcvt.s.w fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3430,7 +3430,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
 ; RV32ID-NEXT:    slli a0, a0, 16
 ; RV32ID-NEXT:    srai a0, a0, 16
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -3446,7 +3446,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
 ; RV64ID-NEXT:    slli a0, a0, 48
 ; RV64ID-NEXT:    srai a0, a0, 48
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -3536,8 +3536,8 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3546,8 +3546,8 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3558,7 +3558,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    fcvt.s.w fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3571,7 +3571,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-LP64-NEXT:    fcvt.s.w fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3583,7 +3583,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -3597,7 +3597,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -3708,8 +3708,8 @@ define half @fcvt_h_ui(i16 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __floatunsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatunsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3720,8 +3720,8 @@ define half @fcvt_h_ui(i16 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __floatunsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatunsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3734,7 +3734,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
 ; RV32ID-ILP32-NEXT:    srli a0, a0, 16
 ; RV32ID-ILP32-NEXT:    fcvt.s.wu fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3749,7 +3749,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
 ; RV64ID-LP64-NEXT:    srli a0, a0, 48
 ; RV64ID-LP64-NEXT:    fcvt.s.wu fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3763,7 +3763,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
 ; RV32ID-NEXT:    slli a0, a0, 16
 ; RV32ID-NEXT:    srli a0, a0, 16
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -3779,7 +3779,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
 ; RV64ID-NEXT:    slli a0, a0, 48
 ; RV64ID-NEXT:    srli a0, a0, 48
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -3869,8 +3869,8 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatunsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -3879,8 +3879,8 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatunsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatunsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3891,7 +3891,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    fcvt.s.wu fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3904,7 +3904,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-LP64-NEXT:    fcvt.s.wu fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3916,7 +3916,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -3930,7 +3930,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -4008,8 +4008,8 @@ define half @fcvt_h_w(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4019,8 +4019,8 @@ define half @fcvt_h_w(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4031,7 +4031,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    fcvt.s.w fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4044,7 +4044,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-LP64-NEXT:    fcvt.s.w fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -4056,7 +4056,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -4070,7 +4070,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -4157,8 +4157,8 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4168,8 +4168,8 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4181,7 +4181,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
 ; RV32ID-ILP32-NEXT:    lw a0, 0(a0)
 ; RV32ID-ILP32-NEXT:    fcvt.s.w fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4195,7 +4195,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
 ; RV64ID-LP64-NEXT:    lw a0, 0(a0)
 ; RV64ID-LP64-NEXT:    fcvt.s.w fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -4208,7 +4208,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    lw a0, 0(a0)
 ; RV32ID-NEXT:    fcvt.s.w fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -4223,7 +4223,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    lw a0, 0(a0)
 ; RV64ID-NEXT:    fcvt.s.w fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -4308,8 +4308,8 @@ define half @fcvt_h_wu(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatunsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatunsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4319,8 +4319,8 @@ define half @fcvt_h_wu(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatunsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatunsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4331,7 +4331,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-ILP32-NEXT:    fcvt.s.wu fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4344,7 +4344,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-LP64-NEXT:    fcvt.s.wu fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -4356,7 +4356,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -4370,7 +4370,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -4478,8 +4478,8 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 0(a0)
-; RV32I-NEXT:    call __floatunsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatunsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4489,8 +4489,8 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    call __floatunsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatunsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4502,7 +4502,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
 ; RV32ID-ILP32-NEXT:    lw a0, 0(a0)
 ; RV32ID-ILP32-NEXT:    fcvt.s.wu fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4516,7 +4516,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
 ; RV64ID-LP64-NEXT:    lwu a0, 0(a0)
 ; RV64ID-LP64-NEXT:    fcvt.s.wu fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -4529,7 +4529,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32ID-NEXT:    lw a0, 0(a0)
 ; RV32ID-NEXT:    fcvt.s.wu fa0, a0
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -4544,7 +4544,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    lwu a0, 0(a0)
 ; RV64ID-NEXT:    fcvt.s.wu fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -4604,7 +4604,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __floatdihf at plt
+; RV32IZFH-NEXT:    call __floatdihf
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -4618,7 +4618,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __floatdihf at plt
+; RV32IDZFH-NEXT:    call __floatdihf
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -4632,7 +4632,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __floatdihf at plt
+; RV32IZHINX-NEXT:    call __floatdihf
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -4646,7 +4646,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __floatdihf at plt
+; RV32IZDINXZHINX-NEXT:    call __floatdihf
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -4660,8 +4660,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatdisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatdisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4670,8 +4670,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatdisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatdisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4680,8 +4680,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __floatdisf at plt
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __floatdisf
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4694,7 +4694,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-LP64-NEXT:    fcvt.s.l fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -4705,8 +4705,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __floatdisf at plt
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __floatdisf
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -4720,7 +4720,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.l fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -4733,7 +4733,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __floatdihf at plt
+; CHECK32-IZFHMIN-NEXT:    call __floatdihf
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -4748,7 +4748,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __floatdihf at plt
+; CHECK32-IZHINXMIN-NEXT:    call __floatdihf
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -4763,7 +4763,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatdihf at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatdihf
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -4782,7 +4782,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __floatundihf at plt
+; RV32IZFH-NEXT:    call __floatundihf
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -4796,7 +4796,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
 ; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call __floatundihf at plt
+; RV32IDZFH-NEXT:    call __floatundihf
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
@@ -4810,7 +4810,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __floatundihf at plt
+; RV32IZHINX-NEXT:    call __floatundihf
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -4824,7 +4824,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32IZDINXZHINX:       # %bb.0:
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT:    call __floatundihf at plt
+; RV32IZDINXZHINX-NEXT:    call __floatundihf
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -4838,8 +4838,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __floatundisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatundisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4848,8 +4848,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __floatundisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatundisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -4858,8 +4858,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __floatundisf at plt
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __floatundisf
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -4872,7 +4872,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-LP64-NEXT:    fcvt.s.lu fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -4883,8 +4883,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __floatundisf at plt
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __floatundisf
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -4898,7 +4898,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64ID-NEXT:    fcvt.s.lu fa0, a0
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -4911,7 +4911,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT:    call __floatundihf at plt
+; CHECK32-IZFHMIN-NEXT:    call __floatundihf
 ; CHECK32-IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZFHMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZFHMIN-NEXT:    ret
@@ -4926,7 +4926,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __floatundihf at plt
+; CHECK32-IZHINXMIN-NEXT:    call __floatundihf
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -4941,7 +4941,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; CHECK32-IZDINXZHINXMIN:       # %bb.0:
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatundihf at plt
+; CHECK32-IZDINXZHINXMIN-NEXT:    call __floatundihf
 ; CHECK32-IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZDINXZHINXMIN-NEXT:    ret
@@ -4985,7 +4985,7 @@ define half @fcvt_h_s(float %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -4994,7 +4994,7 @@ define half @fcvt_h_s(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -5003,7 +5003,7 @@ define half @fcvt_h_s(float %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -5014,7 +5014,7 @@ define half @fcvt_h_s(float %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -5025,7 +5025,7 @@ define half @fcvt_h_s(float %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -5038,7 +5038,7 @@ define half @fcvt_h_s(float %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -5110,7 +5110,7 @@ define float @fcvt_s_h(half %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -5119,7 +5119,7 @@ define float @fcvt_s_h(half %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -5128,7 +5128,7 @@ define float @fcvt_s_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-ILP32-NEXT:    addi sp, sp, 16
 ; RV32ID-ILP32-NEXT:    ret
@@ -5137,7 +5137,7 @@ define float @fcvt_s_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-LP64-NEXT:    addi sp, sp, 16
 ; RV64ID-LP64-NEXT:    ret
@@ -5146,7 +5146,7 @@ define float @fcvt_s_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
 ; RV32ID-NEXT:    ret
@@ -5155,7 +5155,7 @@ define float @fcvt_s_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
 ; RV64ID-NEXT:    ret
@@ -5198,7 +5198,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call __truncdfhf2 at plt
+; RV32IZFH-NEXT:    call __truncdfhf2
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -5207,7 +5207,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    call __truncdfhf2 at plt
+; RV64IZFH-NEXT:    call __truncdfhf2
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
@@ -5226,7 +5226,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV32IZHINX:       # %bb.0:
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT:    call __truncdfhf2 at plt
+; RV32IZHINX-NEXT:    call __truncdfhf2
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -5235,7 +5235,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT:    call __truncdfhf2 at plt
+; RV64IZHINX-NEXT:    call __truncdfhf2
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
 ; RV64IZHINX-NEXT:    ret
@@ -5260,7 +5260,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __truncdfhf2 at plt
+; RV32I-NEXT:    call __truncdfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -5269,7 +5269,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __truncdfhf2 at plt
+; RV64I-NEXT:    call __truncdfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -5278,7 +5278,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __truncdfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncdfhf2
 ; RV32ID-ILP32-NEXT:    lui a1, 1048560
 ; RV32ID-ILP32-NEXT:    or a0, a0, a1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -5289,7 +5289,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __truncdfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncdfhf2
 ; RV64ID-LP64-NEXT:    lui a1, 1048560
 ; RV64ID-LP64-NEXT:    or a0, a0, a1
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -5300,7 +5300,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __truncdfhf2 at plt
+; RV32ID-NEXT:    call __truncdfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    lui a1, 1048560
 ; RV32ID-NEXT:    or a0, a0, a1
@@ -5313,7 +5313,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __truncdfhf2 at plt
+; RV64ID-NEXT:    call __truncdfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    lui a1, 1048560
 ; RV64ID-NEXT:    or a0, a0, a1
@@ -5326,7 +5326,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV32IFZFHMIN:       # %bb.0:
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IFZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFZFHMIN-NEXT:    call __truncdfhf2 at plt
+; RV32IFZFHMIN-NEXT:    call __truncdfhf2
 ; RV32IFZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IFZFHMIN-NEXT:    ret
@@ -5335,7 +5335,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV64IFZFHMIN:       # %bb.0:
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IFZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFZFHMIN-NEXT:    call __truncdfhf2 at plt
+; RV64IFZFHMIN-NEXT:    call __truncdfhf2
 ; RV64IFZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64IFZFHMIN-NEXT:    ret
@@ -5354,7 +5354,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; CHECK32-IZHINXMIN:       # %bb.0:
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT:    call __truncdfhf2 at plt
+; CHECK32-IZHINXMIN-NEXT:    call __truncdfhf2
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -5363,7 +5363,7 @@ define half @fcvt_h_d(double %a) nounwind {
 ; CHECK64-IZHINXMIN:       # %bb.0:
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK64-IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK64-IZHINXMIN-NEXT:    call __truncdfhf2 at plt
+; CHECK64-IZHINXMIN-NEXT:    call __truncdfhf2
 ; CHECK64-IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK64-IZHINXMIN-NEXT:    ret
@@ -5393,7 +5393,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call __extendsfdf2 at plt
+; RV32IZFH-NEXT:    call __extendsfdf2
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -5403,7 +5403,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call __extendsfdf2 at plt
+; RV64IZFH-NEXT:    call __extendsfdf2
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
@@ -5423,7 +5423,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call __extendsfdf2 at plt
+; RV32IZHINX-NEXT:    call __extendsfdf2
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -5433,7 +5433,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call __extendsfdf2 at plt
+; RV64IZHINX-NEXT:    call __extendsfdf2
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
 ; RV64IZHINX-NEXT:    ret
@@ -5458,8 +5458,8 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __extendsfdf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __extendsfdf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -5468,8 +5468,8 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __extendsfdf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __extendsfdf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -5478,7 +5478,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.d.s fa5, fa5
 ; RV32ID-ILP32-NEXT:    fsd fa5, 0(sp)
@@ -5492,7 +5492,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.d.s fa5, fa5
 ; RV64ID-LP64-NEXT:    fmv.x.d a0, fa5
@@ -5504,7 +5504,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.d.s fa0, fa0
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -5514,7 +5514,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.d.s fa0, fa0
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -5525,7 +5525,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IFZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IFZFHMIN-NEXT:    call __extendsfdf2 at plt
+; RV32IFZFHMIN-NEXT:    call __extendsfdf2
 ; RV32IFZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IFZFHMIN-NEXT:    ret
@@ -5535,7 +5535,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IFZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IFZFHMIN-NEXT:    call __extendsfdf2 at plt
+; RV64IFZFHMIN-NEXT:    call __extendsfdf2
 ; RV64IFZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64IFZFHMIN-NEXT:    ret
@@ -5555,7 +5555,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK32-IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK32-IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; CHECK32-IZHINXMIN-NEXT:    call __extendsfdf2 at plt
+; CHECK32-IZHINXMIN-NEXT:    call __extendsfdf2
 ; CHECK32-IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK32-IZHINXMIN-NEXT:    ret
@@ -5565,7 +5565,7 @@ define double @fcvt_d_h(half %a) nounwind {
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, -16
 ; CHECK64-IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK64-IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; CHECK64-IZHINXMIN-NEXT:    call __extendsfdf2 at plt
+; CHECK64-IZHINXMIN-NEXT:    call __extendsfdf2
 ; CHECK64-IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK64-IZHINXMIN-NEXT:    addi sp, sp, 16
 ; CHECK64-IZHINXMIN-NEXT:    ret
@@ -5826,8 +5826,8 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    sh a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -5845,8 +5845,8 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    sh a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -5865,7 +5865,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32ID-ILP32-NEXT:    addi s1, a0, 1
 ; RV32ID-ILP32-NEXT:    fcvt.s.w fa5, s1
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    sh a0, 0(s0)
 ; RV32ID-ILP32-NEXT:    mv a0, s1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -5884,7 +5884,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64ID-LP64-NEXT:    addiw s1, a0, 1
 ; RV64ID-LP64-NEXT:    fcvt.s.w fa5, s1
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    sh a0, 0(s0)
 ; RV64ID-LP64-NEXT:    mv a0, s1
 ; RV64ID-LP64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -5902,7 +5902,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32ID-NEXT:    mv s0, a1
 ; RV32ID-NEXT:    addi s1, a0, 1
 ; RV32ID-NEXT:    fcvt.s.w fa0, s1
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    sh a0, 0(s0)
 ; RV32ID-NEXT:    mv a0, s1
@@ -5921,7 +5921,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64ID-NEXT:    mv s0, a1
 ; RV64ID-NEXT:    addiw s1, a0, 1
 ; RV64ID-NEXT:    fcvt.s.w fa0, s1
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    sh a0, 0(s0)
 ; RV64ID-NEXT:    mv a0, s1
@@ -6051,8 +6051,8 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    addi s1, a0, 1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __floatunsisf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __floatunsisf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    sh a0, 0(s0)
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -6070,8 +6070,8 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    addiw s1, a0, 1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __floatunsisf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __floatunsisf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    sh a0, 0(s0)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -6090,7 +6090,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32ID-ILP32-NEXT:    addi s1, a0, 1
 ; RV32ID-ILP32-NEXT:    fcvt.s.wu fa5, s1
 ; RV32ID-ILP32-NEXT:    fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32ID-ILP32-NEXT:    call __truncsfhf2
 ; RV32ID-ILP32-NEXT:    sh a0, 0(s0)
 ; RV32ID-ILP32-NEXT:    mv a0, s1
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -6109,7 +6109,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64ID-LP64-NEXT:    addiw s1, a0, 1
 ; RV64ID-LP64-NEXT:    fcvt.s.wu fa5, s1
 ; RV64ID-LP64-NEXT:    fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64ID-LP64-NEXT:    call __truncsfhf2
 ; RV64ID-LP64-NEXT:    sh a0, 0(s0)
 ; RV64ID-LP64-NEXT:    mv a0, s1
 ; RV64ID-LP64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -6127,7 +6127,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32ID-NEXT:    mv s0, a1
 ; RV32ID-NEXT:    addi s1, a0, 1
 ; RV32ID-NEXT:    fcvt.s.wu fa0, s1
-; RV32ID-NEXT:    call __truncsfhf2 at plt
+; RV32ID-NEXT:    call __truncsfhf2
 ; RV32ID-NEXT:    fmv.x.w a0, fa0
 ; RV32ID-NEXT:    sh a0, 0(s0)
 ; RV32ID-NEXT:    mv a0, s1
@@ -6146,7 +6146,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64ID-NEXT:    mv s0, a1
 ; RV64ID-NEXT:    addiw s1, a0, 1
 ; RV64ID-NEXT:    fcvt.s.wu fa0, s1
-; RV64ID-NEXT:    call __truncsfhf2 at plt
+; RV64ID-NEXT:    call __truncsfhf2
 ; RV64ID-NEXT:    fmv.x.w a0, fa0
 ; RV64ID-NEXT:    sh a0, 0(s0)
 ; RV64ID-NEXT:    mv a0, s1
@@ -6262,8 +6262,8 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -6274,8 +6274,8 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -6284,7 +6284,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -6295,7 +6295,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -6306,7 +6306,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -6316,7 +6316,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -6487,13 +6487,13 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 815104
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    bgez s2, .LBB32_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -6502,7 +6502,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV32I-NEXT:    lui a0, 290816
 ; RV32I-NEXT:    addi a1, a0, -512
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB32_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    lui s1, 8
@@ -6510,7 +6510,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV32I-NEXT:  .LBB32_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -6532,13 +6532,13 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 815104
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    bgez s2, .LBB32_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -6547,7 +6547,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:    lui a0, 290816
 ; RV64I-NEXT:    addiw a1, a0, -512
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB32_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    lui s1, 8
@@ -6555,7 +6555,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:  .LBB32_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -6572,7 +6572,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    feq.s a0, fa5, fa5
 ; RV32ID-ILP32-NEXT:    neg a0, a0
@@ -6592,7 +6592,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    feq.s a0, fa5, fa5
 ; RV64ID-LP64-NEXT:    lui a1, %hi(.LCPI32_0)
@@ -6612,7 +6612,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    feq.s a0, fa0, fa0
 ; RV32ID-NEXT:    neg a0, a0
 ; RV32ID-NEXT:    lui a1, %hi(.LCPI32_0)
@@ -6631,7 +6631,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    feq.s a0, fa0, fa0
 ; RV64ID-NEXT:    lui a1, %hi(.LCPI32_0)
 ; RV64ID-NEXT:    flw fa5, %lo(.LCPI32_0)(a1)
@@ -6783,8 +6783,8 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -6795,8 +6795,8 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -6805,7 +6805,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -6816,7 +6816,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -6827,7 +6827,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -6837,7 +6837,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -6978,18 +6978,18 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV32I-NEXT:    lui s3, 16
 ; RV32I-NEXT:    addi s3, s3, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 292864
 ; RV32I-NEXT:    addi a1, a0, -256
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB34_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv a0, s3
@@ -7019,18 +7019,18 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:    lui s3, 16
 ; RV64I-NEXT:    addiw s3, s3, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 292864
 ; RV64I-NEXT:    addiw a1, a0, -256
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB34_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv a0, s3
@@ -7053,7 +7053,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    lui a1, %hi(.LCPI34_0)
 ; RV32ID-ILP32-NEXT:    flw fa5, %lo(.LCPI34_0)(a1)
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, a0
@@ -7069,7 +7069,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    lui a1, %hi(.LCPI34_0)
 ; RV64ID-LP64-NEXT:    flw fa5, %lo(.LCPI34_0)(a1)
 ; RV64ID-LP64-NEXT:    fmv.w.x fa4, a0
@@ -7085,7 +7085,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    lui a0, %hi(.LCPI34_0)
 ; RV32ID-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
 ; RV32ID-NEXT:    fmv.w.x fa4, zero
@@ -7100,7 +7100,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    lui a0, %hi(.LCPI34_0)
 ; RV64ID-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
 ; RV64ID-NEXT:    fmv.w.x fa4, zero
@@ -7224,8 +7224,8 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7236,8 +7236,8 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7246,7 +7246,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -7257,7 +7257,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -7268,7 +7268,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -7278,7 +7278,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -7445,13 +7445,13 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 798720
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    bgez s2, .LBB36_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -7459,14 +7459,14 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV32I-NEXT:  .LBB36_2: # %start
 ; RV32I-NEXT:    lui a1, 274400
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB36_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    li s1, 127
 ; RV32I-NEXT:  .LBB36_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -7488,13 +7488,13 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 798720
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    bgez s2, .LBB36_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -7502,14 +7502,14 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV64I-NEXT:  .LBB36_2: # %start
 ; RV64I-NEXT:    lui a1, 274400
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB36_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    li s1, 127
 ; RV64I-NEXT:  .LBB36_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -7526,7 +7526,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    feq.s a0, fa5, fa5
 ; RV32ID-ILP32-NEXT:    neg a0, a0
@@ -7546,7 +7546,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    feq.s a0, fa5, fa5
 ; RV64ID-LP64-NEXT:    neg a0, a0
@@ -7566,7 +7566,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    feq.s a0, fa0, fa0
 ; RV32ID-NEXT:    neg a0, a0
 ; RV32ID-NEXT:    lui a1, 798720
@@ -7585,7 +7585,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    feq.s a0, fa0, fa0
 ; RV64ID-NEXT:    neg a0, a0
 ; RV64ID-NEXT:    lui a1, 798720
@@ -7734,8 +7734,8 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -7746,8 +7746,8 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -7756,7 +7756,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0:
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -7767,7 +7767,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0:
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -7778,7 +7778,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
@@ -7788,7 +7788,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 ; RV64ID:       # %bb.0:
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64ID-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64ID-NEXT:    addi sp, sp, 16
@@ -7923,17 +7923,17 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a1, 276464
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB38_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    li a0, 255
@@ -7960,17 +7960,17 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a1, 276464
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB38_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, 255
@@ -7992,7 +7992,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa4, zero
 ; RV32ID-ILP32-NEXT:    fmax.s fa5, fa5, fa4
@@ -8008,7 +8008,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fmv.w.x fa4, zero
 ; RV64ID-LP64-NEXT:    fmax.s fa5, fa5, fa4
@@ -8024,7 +8024,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fmv.w.x fa5, zero
 ; RV32ID-NEXT:    fmax.s fa5, fa0, fa5
 ; RV32ID-NEXT:    lui a0, 276464
@@ -8039,7 +8039,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fmv.w.x fa5, zero
 ; RV64ID-NEXT:    fmax.s fa5, fa0, fa5
 ; RV64ID-NEXT:    lui a0, 276464
@@ -8203,20 +8203,20 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 325632
 ; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    sgtz a0, a0
 ; RV32I-NEXT:    neg s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    call __fixunssfsi
 ; RV32I-NEXT:    and a0, s2, a0
 ; RV32I-NEXT:    or a0, s1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -8235,18 +8235,18 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    call __fixunssfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a1, 325632
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB39_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    li a0, -1
@@ -8270,7 +8270,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    feq.s a1, fa5, fa5
@@ -8285,7 +8285,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    feq.s a1, fa5, fa5
@@ -8302,7 +8302,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32ID-NEXT:    feq.s a1, fa0, fa0
 ; RV32ID-NEXT:    seqz a1, a1
@@ -8316,7 +8316,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV64ID-NEXT:    feq.s a1, fa0, fa0
 ; RV64ID-NEXT:    seqz a1, a1
@@ -8454,13 +8454,13 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 847872
-; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    call __gesf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    call __fixsfsi
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui s3, 524288
 ; RV32I-NEXT:    bgez s2, .LBB40_2
@@ -8470,14 +8470,14 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV32I-NEXT:    lui a1, 323584
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    blez a0, .LBB40_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    addi s1, s3, -1
 ; RV32I-NEXT:  .LBB40_4: # %start
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, s1
@@ -8499,13 +8499,13 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 847872
-; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    call __gesf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    call __fixsfdi
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bgez s2, .LBB40_2
@@ -8515,14 +8515,14 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV64I-NEXT:    lui a1, 323584
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    blez a0, .LBB40_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    addi s1, s3, -1
 ; RV64I-NEXT:  .LBB40_4: # %start
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
@@ -8539,7 +8539,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV32ID-ILP32:       # %bb.0: # %start
 ; RV32ID-ILP32-NEXT:    addi sp, sp, -16
 ; RV32ID-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT:    call __extendhfsf2 at plt
+; RV32ID-ILP32-NEXT:    call __extendhfsf2
 ; RV32ID-ILP32-NEXT:    fmv.w.x fa5, a0
 ; RV32ID-ILP32-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32ID-ILP32-NEXT:    feq.s a1, fa5, fa5
@@ -8554,7 +8554,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV64ID-LP64:       # %bb.0: # %start
 ; RV64ID-LP64-NEXT:    addi sp, sp, -16
 ; RV64ID-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT:    call __extendhfsf2 at plt
+; RV64ID-LP64-NEXT:    call __extendhfsf2
 ; RV64ID-LP64-NEXT:    fmv.w.x fa5, a0
 ; RV64ID-LP64-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64ID-LP64-NEXT:    feq.s a1, fa5, fa5
@@ -8569,7 +8569,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV32ID:       # %bb.0: # %start
 ; RV32ID-NEXT:    addi sp, sp, -16
 ; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT:    call __extendhfsf2 at plt
+; RV32ID-NEXT:    call __extendhfsf2
 ; RV32ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32ID-NEXT:    feq.s a1, fa0, fa0
 ; RV32ID-NEXT:    seqz a1, a1
@@ -8583,7 +8583,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV64ID:       # %bb.0: # %start
 ; RV64ID-NEXT:    addi sp, sp, -16
 ; RV64ID-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT:    call __extendhfsf2 at plt
+; RV64ID-NEXT:    call __extendhfsf2
 ; RV64ID-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV64ID-NEXT:    feq.s a1, fa0, fa0
 ; RV64ID-NEXT:    seqz a1, a1
diff --git a/llvm/test/CodeGen/RISCV/half-frem.ll b/llvm/test/CodeGen/RISCV/half-frem.ll
index 73d1760c8596df..a262094190e186 100644
--- a/llvm/test/CodeGen/RISCV/half-frem.ll
+++ b/llvm/test/CodeGen/RISCV/half-frem.ll
@@ -31,7 +31,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
 ; RV32IZFH-NEXT:    fcvt.s.h fa1, fa1
-; RV32IZFH-NEXT:    call fmodf at plt
+; RV32IZFH-NEXT:    call fmodf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -43,7 +43,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
 ; RV64IZFH-NEXT:    fcvt.s.h fa1, fa1
-; RV64IZFH-NEXT:    call fmodf at plt
+; RV64IZFH-NEXT:    call fmodf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -55,7 +55,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
 ; RV32IZHINX-NEXT:    fcvt.s.h a1, a1
-; RV32IZHINX-NEXT:    call fmodf at plt
+; RV32IZHINX-NEXT:    call fmodf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -67,7 +67,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINX-NEXT:    fcvt.s.h a1, a1
-; RV64IZHINX-NEXT:    call fmodf at plt
+; RV64IZHINX-NEXT:    call fmodf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -79,7 +79,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa1, fa1
-; RV32IZFHMIN-NEXT:    call fmodf at plt
+; RV32IZFHMIN-NEXT:    call fmodf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -91,7 +91,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa1, fa1
-; RV64IZFHMIN-NEXT:    call fmodf at plt
+; RV64IZFHMIN-NEXT:    call fmodf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -103,7 +103,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a1, a1
-; RV32IZHINXMIN-NEXT:    call fmodf at plt
+; RV32IZHINXMIN-NEXT:    call fmodf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -115,7 +115,7 @@ define half @frem_f16(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a1, a1
-; RV64IZHINXMIN-NEXT:    call fmodf at plt
+; RV64IZHINXMIN-NEXT:    call fmodf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 2d4b7538c34d13..c493a9b2cb1dfb 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -75,9 +75,9 @@ define half @sqrt_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call sqrtf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call sqrtf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -88,9 +88,9 @@ define half @sqrt_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call sqrtf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call sqrtf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -120,7 +120,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call __powisf2 at plt
+; RV32IZFH-NEXT:    call __powisf2
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -132,7 +132,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
 ; RV64IZFH-NEXT:    sext.w a0, a0
-; RV64IZFH-NEXT:    call __powisf2 at plt
+; RV64IZFH-NEXT:    call __powisf2
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -143,7 +143,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call __powisf2 at plt
+; RV32IZHINX-NEXT:    call __powisf2
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -155,7 +155,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    sext.w a1, a1
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call __powisf2 at plt
+; RV64IZHINX-NEXT:    call __powisf2
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -169,10 +169,10 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __powisf2 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __powisf2
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -186,10 +186,10 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    sext.w a1, s0
-; RV64I-NEXT:    call __powisf2 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __powisf2
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -200,7 +200,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call __powisf2 at plt
+; RV32IZFHMIN-NEXT:    call __powisf2
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -212,7 +212,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
 ; RV64IZFHMIN-NEXT:    sext.w a0, a0
-; RV64IZFHMIN-NEXT:    call __powisf2 at plt
+; RV64IZFHMIN-NEXT:    call __powisf2
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -223,7 +223,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call __powisf2 at plt
+; RV32IZHINXMIN-NEXT:    call __powisf2
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -235,7 +235,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    sext.w a1, a1
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call __powisf2 at plt
+; RV64IZHINXMIN-NEXT:    call __powisf2
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -252,7 +252,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call sinf at plt
+; RV32IZFH-NEXT:    call sinf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -263,7 +263,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call sinf at plt
+; RV64IZFH-NEXT:    call sinf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -274,7 +274,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call sinf at plt
+; RV32IZHINX-NEXT:    call sinf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -285,7 +285,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call sinf at plt
+; RV64IZHINX-NEXT:    call sinf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -297,9 +297,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call sinf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call sinf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -310,9 +310,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call sinf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call sinf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -322,7 +322,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call sinf at plt
+; RV32IZFHMIN-NEXT:    call sinf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -333,7 +333,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call sinf at plt
+; RV64IZFHMIN-NEXT:    call sinf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -344,7 +344,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call sinf at plt
+; RV32IZHINXMIN-NEXT:    call sinf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -355,7 +355,7 @@ define half @sin_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call sinf at plt
+; RV64IZHINXMIN-NEXT:    call sinf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -372,7 +372,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call cosf at plt
+; RV32IZFH-NEXT:    call cosf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -383,7 +383,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call cosf at plt
+; RV64IZFH-NEXT:    call cosf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -394,7 +394,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call cosf at plt
+; RV32IZHINX-NEXT:    call cosf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -405,7 +405,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call cosf at plt
+; RV64IZHINX-NEXT:    call cosf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -417,9 +417,9 @@ define half @cos_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call cosf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call cosf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -430,9 +430,9 @@ define half @cos_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call cosf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call cosf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -442,7 +442,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call cosf at plt
+; RV32IZFHMIN-NEXT:    call cosf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -453,7 +453,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call cosf at plt
+; RV64IZFHMIN-NEXT:    call cosf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -464,7 +464,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call cosf at plt
+; RV32IZHINXMIN-NEXT:    call cosf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -475,7 +475,7 @@ define half @cos_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call cosf at plt
+; RV64IZHINXMIN-NEXT:    call cosf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -494,10 +494,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IFZFH-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
 ; RV32IFZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IFZFH-NEXT:    fmv.s fa0, fs0
-; RV32IFZFH-NEXT:    call sinf at plt
+; RV32IFZFH-NEXT:    call sinf
 ; RV32IFZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IFZFH-NEXT:    fmv.s fa0, fs0
-; RV32IFZFH-NEXT:    call cosf at plt
+; RV32IFZFH-NEXT:    call cosf
 ; RV32IFZFH-NEXT:    fcvt.h.s fa5, fa0
 ; RV32IFZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV32IFZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -514,10 +514,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IFZFH-NEXT:    fsw fs1, 0(sp) # 4-byte Folded Spill
 ; RV64IFZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV64IFZFH-NEXT:    fmv.s fa0, fs0
-; RV64IFZFH-NEXT:    call sinf at plt
+; RV64IFZFH-NEXT:    call sinf
 ; RV64IFZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IFZFH-NEXT:    fmv.s fa0, fs0
-; RV64IFZFH-NEXT:    call cosf at plt
+; RV64IFZFH-NEXT:    call cosf
 ; RV64IFZFH-NEXT:    fcvt.h.s fa5, fa0
 ; RV64IFZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV64IFZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -534,10 +534,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h s0, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call sinf at plt
+; RV32IZHINX-NEXT:    call sinf
 ; RV32IZHINX-NEXT:    fcvt.h.s s1, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call cosf at plt
+; RV32IZHINX-NEXT:    call cosf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    fadd.h a0, s1, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -554,10 +554,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h s0, a0
 ; RV64IZHINX-NEXT:    mv a0, s0
-; RV64IZHINX-NEXT:    call sinf at plt
+; RV64IZHINX-NEXT:    call sinf
 ; RV64IZHINX-NEXT:    fcvt.h.s s1, a0
 ; RV64IZHINX-NEXT:    mv a0, s0
-; RV64IZHINX-NEXT:    call cosf at plt
+; RV64IZHINX-NEXT:    call cosf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    fadd.h a0, s1, a0
 ; RV64IZHINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -574,10 +574,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IDZFH-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV32IDZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IDZFH-NEXT:    fmv.s fa0, fs0
-; RV32IDZFH-NEXT:    call sinf at plt
+; RV32IDZFH-NEXT:    call sinf
 ; RV32IDZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IDZFH-NEXT:    fmv.s fa0, fs0
-; RV32IDZFH-NEXT:    call cosf at plt
+; RV32IDZFH-NEXT:    call cosf
 ; RV32IDZFH-NEXT:    fcvt.h.s fa5, fa0
 ; RV32IDZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV32IDZFH-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -594,10 +594,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IDZFH-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV64IDZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV64IDZFH-NEXT:    fmv.s fa0, fs0
-; RV64IDZFH-NEXT:    call sinf at plt
+; RV64IDZFH-NEXT:    call sinf
 ; RV64IDZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IDZFH-NEXT:    fmv.s fa0, fs0
-; RV64IDZFH-NEXT:    call cosf at plt
+; RV64IDZFH-NEXT:    call cosf
 ; RV64IDZFH-NEXT:    fcvt.h.s fa5, fa0
 ; RV64IDZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV64IDZFH-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -616,24 +616,24 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    call sinf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call sinf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call cosf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call cosf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s1, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -651,24 +651,24 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    call sinf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call sinf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call cosf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call cosf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s1, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -684,10 +684,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IFZFHMIN-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
 ; RV32IFZFHMIN-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IFZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IFZFHMIN-NEXT:    call sinf at plt
+; RV32IFZFHMIN-NEXT:    call sinf
 ; RV32IFZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IFZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IFZFHMIN-NEXT:    call cosf at plt
+; RV32IFZFHMIN-NEXT:    call cosf
 ; RV32IFZFHMIN-NEXT:    fcvt.h.s fa5, fa0
 ; RV32IFZFHMIN-NEXT:    fcvt.s.h fa5, fa5
 ; RV32IFZFHMIN-NEXT:    fcvt.s.h fa4, fs1
@@ -707,10 +707,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IFZFHMIN-NEXT:    fsw fs1, 0(sp) # 4-byte Folded Spill
 ; RV64IFZFHMIN-NEXT:    fcvt.s.h fs0, fa0
 ; RV64IFZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV64IFZFHMIN-NEXT:    call sinf at plt
+; RV64IFZFHMIN-NEXT:    call sinf
 ; RV64IFZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IFZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV64IFZFHMIN-NEXT:    call cosf at plt
+; RV64IFZFHMIN-NEXT:    call cosf
 ; RV64IFZFHMIN-NEXT:    fcvt.h.s fa5, fa0
 ; RV64IFZFHMIN-NEXT:    fcvt.s.h fa5, fa5
 ; RV64IFZFHMIN-NEXT:    fcvt.s.h fa4, fs1
@@ -730,10 +730,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IDZFHMIN-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV32IDZFHMIN-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IDZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IDZFHMIN-NEXT:    call sinf at plt
+; RV32IDZFHMIN-NEXT:    call sinf
 ; RV32IDZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IDZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IDZFHMIN-NEXT:    call cosf at plt
+; RV32IDZFHMIN-NEXT:    call cosf
 ; RV32IDZFHMIN-NEXT:    fcvt.h.s fa5, fa0
 ; RV32IDZFHMIN-NEXT:    fcvt.s.h fa5, fa5
 ; RV32IDZFHMIN-NEXT:    fcvt.s.h fa4, fs1
@@ -753,10 +753,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IDZFHMIN-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV64IDZFHMIN-NEXT:    fcvt.s.h fs0, fa0
 ; RV64IDZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV64IDZFHMIN-NEXT:    call sinf at plt
+; RV64IDZFHMIN-NEXT:    call sinf
 ; RV64IDZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IDZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV64IDZFHMIN-NEXT:    call cosf at plt
+; RV64IDZFHMIN-NEXT:    call cosf
 ; RV64IDZFHMIN-NEXT:    fcvt.h.s fa5, fa0
 ; RV64IDZFHMIN-NEXT:    fcvt.s.h fa5, fa5
 ; RV64IDZFHMIN-NEXT:    fcvt.s.h fa4, fs1
@@ -776,10 +776,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h s0, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call sinf at plt
+; RV32IZHINXMIN-NEXT:    call sinf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s s1, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call cosf at plt
+; RV32IZHINXMIN-NEXT:    call cosf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a1, s1
@@ -799,10 +799,10 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h s0, a0
 ; RV64IZHINXMIN-NEXT:    mv a0, s0
-; RV64IZHINXMIN-NEXT:    call sinf at plt
+; RV64IZHINXMIN-NEXT:    call sinf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s s1, a0
 ; RV64IZHINXMIN-NEXT:    mv a0, s0
-; RV64IZHINXMIN-NEXT:    call cosf at plt
+; RV64IZHINXMIN-NEXT:    call cosf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a1, s1
@@ -828,7 +828,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
 ; RV32IZFH-NEXT:    fcvt.s.h fa1, fa1
-; RV32IZFH-NEXT:    call powf at plt
+; RV32IZFH-NEXT:    call powf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -840,7 +840,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
 ; RV64IZFH-NEXT:    fcvt.s.h fa1, fa1
-; RV64IZFH-NEXT:    call powf at plt
+; RV64IZFH-NEXT:    call powf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -852,7 +852,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
 ; RV32IZHINX-NEXT:    fcvt.s.h a1, a1
-; RV32IZHINX-NEXT:    call powf at plt
+; RV32IZHINX-NEXT:    call powf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -864,7 +864,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINX-NEXT:    fcvt.s.h a1, a1
-; RV64IZHINX-NEXT:    call powf at plt
+; RV64IZHINX-NEXT:    call powf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -881,14 +881,14 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call powf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call powf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -907,14 +907,14 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call powf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call powf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -928,7 +928,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa1, fa1
-; RV32IZFHMIN-NEXT:    call powf at plt
+; RV32IZFHMIN-NEXT:    call powf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -940,7 +940,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa1, fa1
-; RV64IZFHMIN-NEXT:    call powf at plt
+; RV64IZFHMIN-NEXT:    call powf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -952,7 +952,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a1, a1
-; RV32IZHINXMIN-NEXT:    call powf at plt
+; RV32IZHINXMIN-NEXT:    call powf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -964,7 +964,7 @@ define half @pow_f16(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a1, a1
-; RV64IZHINXMIN-NEXT:    call powf at plt
+; RV64IZHINXMIN-NEXT:    call powf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -981,7 +981,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call expf at plt
+; RV32IZFH-NEXT:    call expf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -992,7 +992,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call expf at plt
+; RV64IZFH-NEXT:    call expf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -1003,7 +1003,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call expf at plt
+; RV32IZHINX-NEXT:    call expf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -1014,7 +1014,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call expf at plt
+; RV64IZHINX-NEXT:    call expf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -1026,9 +1026,9 @@ define half @exp_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call expf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call expf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1039,9 +1039,9 @@ define half @exp_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call expf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call expf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1051,7 +1051,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call expf at plt
+; RV32IZFHMIN-NEXT:    call expf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1062,7 +1062,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call expf at plt
+; RV64IZFHMIN-NEXT:    call expf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1073,7 +1073,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call expf at plt
+; RV32IZHINXMIN-NEXT:    call expf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1084,7 +1084,7 @@ define half @exp_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call expf at plt
+; RV64IZHINXMIN-NEXT:    call expf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1101,7 +1101,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call exp2f at plt
+; RV32IZFH-NEXT:    call exp2f
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -1112,7 +1112,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call exp2f at plt
+; RV64IZFH-NEXT:    call exp2f
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -1123,7 +1123,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call exp2f at plt
+; RV32IZHINX-NEXT:    call exp2f
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -1134,7 +1134,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call exp2f at plt
+; RV64IZHINX-NEXT:    call exp2f
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -1146,9 +1146,9 @@ define half @exp2_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call exp2f at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call exp2f
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1159,9 +1159,9 @@ define half @exp2_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call exp2f at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call exp2f
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1171,7 +1171,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call exp2f at plt
+; RV32IZFHMIN-NEXT:    call exp2f
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1182,7 +1182,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call exp2f at plt
+; RV64IZFHMIN-NEXT:    call exp2f
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1193,7 +1193,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call exp2f at plt
+; RV32IZHINXMIN-NEXT:    call exp2f
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1204,7 +1204,7 @@ define half @exp2_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call exp2f at plt
+; RV64IZHINXMIN-NEXT:    call exp2f
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1221,7 +1221,7 @@ define half @log_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call logf at plt
+; RV32IZFH-NEXT:    call logf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -1232,7 +1232,7 @@ define half @log_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call logf at plt
+; RV64IZFH-NEXT:    call logf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -1243,7 +1243,7 @@ define half @log_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call logf at plt
+; RV32IZHINX-NEXT:    call logf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -1254,7 +1254,7 @@ define half @log_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call logf at plt
+; RV64IZHINX-NEXT:    call logf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -1266,9 +1266,9 @@ define half @log_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call logf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call logf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1279,9 +1279,9 @@ define half @log_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call logf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call logf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1291,7 +1291,7 @@ define half @log_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call logf at plt
+; RV32IZFHMIN-NEXT:    call logf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1302,7 +1302,7 @@ define half @log_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call logf at plt
+; RV64IZFHMIN-NEXT:    call logf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1313,7 +1313,7 @@ define half @log_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call logf at plt
+; RV32IZHINXMIN-NEXT:    call logf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1324,7 +1324,7 @@ define half @log_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call logf at plt
+; RV64IZHINXMIN-NEXT:    call logf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1341,7 +1341,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call log10f at plt
+; RV32IZFH-NEXT:    call log10f
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -1352,7 +1352,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call log10f at plt
+; RV64IZFH-NEXT:    call log10f
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -1363,7 +1363,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call log10f at plt
+; RV32IZHINX-NEXT:    call log10f
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -1374,7 +1374,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call log10f at plt
+; RV64IZHINX-NEXT:    call log10f
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -1386,9 +1386,9 @@ define half @log10_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call log10f at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call log10f
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1399,9 +1399,9 @@ define half @log10_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call log10f at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call log10f
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1411,7 +1411,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call log10f at plt
+; RV32IZFHMIN-NEXT:    call log10f
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1422,7 +1422,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call log10f at plt
+; RV64IZFHMIN-NEXT:    call log10f
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1433,7 +1433,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call log10f at plt
+; RV32IZHINXMIN-NEXT:    call log10f
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1444,7 +1444,7 @@ define half @log10_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call log10f at plt
+; RV64IZHINXMIN-NEXT:    call log10f
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1461,7 +1461,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call log2f at plt
+; RV32IZFH-NEXT:    call log2f
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -1472,7 +1472,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call log2f at plt
+; RV64IZFH-NEXT:    call log2f
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -1483,7 +1483,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call log2f at plt
+; RV32IZHINX-NEXT:    call log2f
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -1494,7 +1494,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call log2f at plt
+; RV64IZHINX-NEXT:    call log2f
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -1506,9 +1506,9 @@ define half @log2_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call log2f at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call log2f
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1519,9 +1519,9 @@ define half @log2_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call log2f at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call log2f
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1531,7 +1531,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call log2f at plt
+; RV32IZFHMIN-NEXT:    call log2f
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1542,7 +1542,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call log2f at plt
+; RV64IZFHMIN-NEXT:    call log2f
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1553,7 +1553,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call log2f at plt
+; RV32IZHINXMIN-NEXT:    call log2f
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1564,7 +1564,7 @@ define half @log2_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call log2f at plt
+; RV64IZHINXMIN-NEXT:    call log2f
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -1599,18 +1599,18 @@ define half @fma_f16(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s3, a1, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call fmaf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1632,18 +1632,18 @@ define half @fma_f16(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s3, a1, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call fmaf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1699,23 +1699,23 @@ define half @fmuladd_f16(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s3, a1, -1
 ; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __mulsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __mulsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    and a0, s1, s3
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
@@ -1737,23 +1737,23 @@ define half @fmuladd_f16(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s3, a1, -1
 ; RV64I-NEXT:    and a0, a0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __mulsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __mulsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    and a0, s1, s3
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
@@ -1867,14 +1867,14 @@ define half @minnum_f16(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call fminf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fminf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1893,14 +1893,14 @@ define half @minnum_f16(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fminf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fminf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -1951,14 +1951,14 @@ define half @maxnum_f16(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call fmaxf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call fmaxf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -1977,14 +1977,14 @@ define half @maxnum_f16(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call fmaxf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call fmaxf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -2159,9 +2159,9 @@ define half @floor_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call floorf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call floorf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2172,9 +2172,9 @@ define half @floor_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call floorf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call floorf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2250,9 +2250,9 @@ define half @ceil_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call ceilf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call ceilf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2263,9 +2263,9 @@ define half @ceil_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call ceilf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call ceilf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2341,9 +2341,9 @@ define half @trunc_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call truncf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call truncf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2354,9 +2354,9 @@ define half @trunc_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call truncf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call truncf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2432,9 +2432,9 @@ define half @rint_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call rintf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call rintf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2445,9 +2445,9 @@ define half @rint_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call rintf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call rintf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2494,7 +2494,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call nearbyintf at plt
+; RV32IZFH-NEXT:    call nearbyintf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -2505,7 +2505,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call nearbyintf at plt
+; RV64IZFH-NEXT:    call nearbyintf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -2516,7 +2516,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call nearbyintf at plt
+; RV32IZHINX-NEXT:    call nearbyintf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -2527,7 +2527,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call nearbyintf at plt
+; RV64IZHINX-NEXT:    call nearbyintf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -2539,9 +2539,9 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call nearbyintf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call nearbyintf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2552,9 +2552,9 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call nearbyintf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call nearbyintf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2564,7 +2564,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call nearbyintf at plt
+; RV32IZFHMIN-NEXT:    call nearbyintf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -2575,7 +2575,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call nearbyintf at plt
+; RV64IZFHMIN-NEXT:    call nearbyintf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -2586,7 +2586,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT:    call nearbyintf at plt
+; RV32IZHINXMIN-NEXT:    call nearbyintf
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -2597,7 +2597,7 @@ define half @nearbyint_f16(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT:    call nearbyintf at plt
+; RV64IZHINXMIN-NEXT:    call nearbyintf
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
@@ -2643,9 +2643,9 @@ define half @round_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call roundf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call roundf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2656,9 +2656,9 @@ define half @round_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call roundf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call roundf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -2734,9 +2734,9 @@ define half @roundeven_f16(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
-; RV32I-NEXT:    call roundevenf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
+; RV32I-NEXT:    call roundevenf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -2747,9 +2747,9 @@ define half @roundeven_f16(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
-; RV64I-NEXT:    call roundevenf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    call roundevenf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index bb25d2a7443df8..5b6a94a83f94bf 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -264,7 +264,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV32IZFH-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fmv.h fs0, fa0
 ; RV32IZFH-NEXT:    addi a0, sp, 4
-; RV32IZFH-NEXT:    call notdead at plt
+; RV32IZFH-NEXT:    call notdead
 ; RV32IZFH-NEXT:    flh fa5, 4(sp)
 ; RV32IZFH-NEXT:    fadd.h fa0, fa5, fs0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -279,7 +279,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV64IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV64IZFH-NEXT:    fmv.h fs0, fa0
 ; RV64IZFH-NEXT:    mv a0, sp
-; RV64IZFH-NEXT:    call notdead at plt
+; RV64IZFH-NEXT:    call notdead
 ; RV64IZFH-NEXT:    flh fa5, 0(sp)
 ; RV64IZFH-NEXT:    fadd.h fa0, fa5, fs0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -294,7 +294,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV32IZHINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    mv s0, a0
 ; RV32IZHINX-NEXT:    addi a0, sp, 4
-; RV32IZHINX-NEXT:    call notdead at plt
+; RV32IZHINX-NEXT:    call notdead
 ; RV32IZHINX-NEXT:    lh a0, 4(sp)
 ; RV32IZHINX-NEXT:    fadd.h a0, a0, s0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -309,7 +309,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV64IZHINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    mv s0, a0
 ; RV64IZHINX-NEXT:    addi a0, sp, 12
-; RV64IZHINX-NEXT:    call notdead at plt
+; RV64IZHINX-NEXT:    call notdead
 ; RV64IZHINX-NEXT:    lh a0, 12(sp)
 ; RV64IZHINX-NEXT:    fadd.h a0, a0, s0
 ; RV64IZHINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -324,7 +324,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fmv.s fs0, fa0
 ; RV32IZFHMIN-NEXT:    addi a0, sp, 4
-; RV32IZFHMIN-NEXT:    call notdead at plt
+; RV32IZFHMIN-NEXT:    call notdead
 ; RV32IZFHMIN-NEXT:    flh fa5, 4(sp)
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fs0
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
@@ -342,7 +342,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fmv.s fs0, fa0
 ; RV64IZFHMIN-NEXT:    mv a0, sp
-; RV64IZFHMIN-NEXT:    call notdead at plt
+; RV64IZFHMIN-NEXT:    call notdead
 ; RV64IZFHMIN-NEXT:    flh fa5, 0(sp)
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fs0
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
@@ -360,7 +360,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV32IZHINXMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    mv s0, a0
 ; RV32IZHINXMIN-NEXT:    addi a0, sp, 4
-; RV32IZHINXMIN-NEXT:    call notdead at plt
+; RV32IZHINXMIN-NEXT:    call notdead
 ; RV32IZHINXMIN-NEXT:    lh a0, 4(sp)
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a1, s0
 ; RV32IZHINXMIN-NEXT:    fcvt.s.h a0, a0
@@ -378,7 +378,7 @@ define half @flh_stack(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-NEXT:    mv s0, a0
 ; RV64IZHINXMIN-NEXT:    addi a0, sp, 12
-; RV64IZHINXMIN-NEXT:    call notdead at plt
+; RV64IZHINXMIN-NEXT:    call notdead
 ; RV64IZHINXMIN-NEXT:    lh a0, 12(sp)
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a1, s0
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
@@ -403,7 +403,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    fadd.h fa5, fa0, fa1
 ; RV32IZFH-NEXT:    fsh fa5, 8(sp)
 ; RV32IZFH-NEXT:    addi a0, sp, 8
-; RV32IZFH-NEXT:    call notdead at plt
+; RV32IZFH-NEXT:    call notdead
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -415,7 +415,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    fadd.h fa5, fa0, fa1
 ; RV64IZFH-NEXT:    fsh fa5, 4(sp)
 ; RV64IZFH-NEXT:    addi a0, sp, 4
-; RV64IZFH-NEXT:    call notdead at plt
+; RV64IZFH-NEXT:    call notdead
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
@@ -427,7 +427,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZHINX-NEXT:    fadd.h a0, a0, a1
 ; RV32IZHINX-NEXT:    sh a0, 8(sp)
 ; RV32IZHINX-NEXT:    addi a0, sp, 8
-; RV32IZHINX-NEXT:    call notdead at plt
+; RV32IZHINX-NEXT:    call notdead
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -439,7 +439,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV64IZHINX-NEXT:    fadd.h a0, a0, a1
 ; RV64IZHINX-NEXT:    sh a0, 4(sp)
 ; RV64IZHINX-NEXT:    addi a0, sp, 4
-; RV64IZHINX-NEXT:    call notdead at plt
+; RV64IZHINX-NEXT:    call notdead
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
 ; RV64IZHINX-NEXT:    ret
@@ -454,7 +454,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
 ; RV32IZFHMIN-NEXT:    fsh fa5, 8(sp)
 ; RV32IZFHMIN-NEXT:    addi a0, sp, 8
-; RV32IZFHMIN-NEXT:    call notdead at plt
+; RV32IZFHMIN-NEXT:    call notdead
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -469,7 +469,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
 ; RV64IZFHMIN-NEXT:    fsh fa5, 4(sp)
 ; RV64IZFHMIN-NEXT:    addi a0, sp, 4
-; RV64IZFHMIN-NEXT:    call notdead at plt
+; RV64IZFHMIN-NEXT:    call notdead
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64IZFHMIN-NEXT:    ret
@@ -484,7 +484,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    sh a0, 8(sp)
 ; RV32IZHINXMIN-NEXT:    addi a0, sp, 8
-; RV32IZHINXMIN-NEXT:    call notdead at plt
+; RV32IZHINXMIN-NEXT:    call notdead
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -499,7 +499,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    sh a0, 4(sp)
 ; RV64IZHINXMIN-NEXT:    addi a0, sp, 4
-; RV64IZHINXMIN-NEXT:    call notdead at plt
+; RV64IZHINXMIN-NEXT:    call notdead
 ; RV64IZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV64IZHINXMIN-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index 962ed8393b7269..3f385909b0b510 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -114,7 +114,7 @@ define i64 @test_floor_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixsfdi at plt
+; RV32IZFH-NEXT:    call __fixsfdi
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    lui a2, 524288
 ; RV32IZFH-NEXT:    beqz s0, .LBB1_4
@@ -173,7 +173,7 @@ define i64 @test_floor_si64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINX-NEXT:    neg s2, s1
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZHINX-NEXT:    call __fixsfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI1_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI1_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s2, a0
@@ -243,7 +243,7 @@ define i64 @test_floor_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixsfdi
 ; RV32IZFHMIN-NEXT:    lui a4, 524288
 ; RV32IZFHMIN-NEXT:    lui a2, 524288
 ; RV32IZFHMIN-NEXT:    beqz s0, .LBB1_4
@@ -316,7 +316,7 @@ define i64 @test_floor_si64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINXMIN-NEXT:    neg s2, s1
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixsfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixsfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI1_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI1_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s2, a0
@@ -529,7 +529,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixunssfdi at plt
+; RV32IZFH-NEXT:    call __fixunssfdi
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI3_1)
 ; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI3_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
@@ -573,7 +573,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINX-NEXT:    neg s1, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZHINX-NEXT:    call __fixunssfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI3_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI3_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s1, a0
@@ -630,7 +630,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunssfdi
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI3_0)
 ; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI3_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
@@ -688,7 +688,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINXMIN-NEXT:    neg s1, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunssfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI3_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI3_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s1, a0
@@ -826,7 +826,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixsfdi at plt
+; RV32IZFH-NEXT:    call __fixsfdi
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    lui a2, 524288
 ; RV32IZFH-NEXT:    beqz s0, .LBB5_4
@@ -885,7 +885,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINX-NEXT:    neg s2, s1
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZHINX-NEXT:    call __fixsfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI5_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI5_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s2, a0
@@ -955,7 +955,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixsfdi
 ; RV32IZFHMIN-NEXT:    lui a4, 524288
 ; RV32IZFHMIN-NEXT:    lui a2, 524288
 ; RV32IZFHMIN-NEXT:    beqz s0, .LBB5_4
@@ -1028,7 +1028,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINXMIN-NEXT:    neg s2, s1
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixsfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixsfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI5_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI5_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s2, a0
@@ -1241,7 +1241,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixunssfdi at plt
+; RV32IZFH-NEXT:    call __fixunssfdi
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI7_1)
 ; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI7_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
@@ -1285,7 +1285,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINX-NEXT:    neg s1, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZHINX-NEXT:    call __fixunssfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI7_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI7_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s1, a0
@@ -1342,7 +1342,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunssfdi
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI7_0)
 ; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI7_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
@@ -1400,7 +1400,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINXMIN-NEXT:    neg s1, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunssfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI7_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI7_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s1, a0
@@ -1538,7 +1538,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixsfdi at plt
+; RV32IZFH-NEXT:    call __fixsfdi
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    lui a2, 524288
 ; RV32IZFH-NEXT:    beqz s0, .LBB9_4
@@ -1597,7 +1597,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINX-NEXT:    neg s2, s1
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZHINX-NEXT:    call __fixsfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI9_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI9_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s2, a0
@@ -1667,7 +1667,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixsfdi
 ; RV32IZFHMIN-NEXT:    lui a4, 524288
 ; RV32IZFHMIN-NEXT:    lui a2, 524288
 ; RV32IZFHMIN-NEXT:    beqz s0, .LBB9_4
@@ -1740,7 +1740,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINXMIN-NEXT:    neg s2, s1
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixsfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixsfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI9_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI9_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s2, a0
@@ -1953,7 +1953,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixunssfdi at plt
+; RV32IZFH-NEXT:    call __fixunssfdi
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI11_1)
 ; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI11_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
@@ -1997,7 +1997,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINX-NEXT:    neg s1, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZHINX-NEXT:    call __fixunssfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI11_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI11_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s1, a0
@@ -2054,7 +2054,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunssfdi
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI11_0)
 ; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI11_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
@@ -2112,7 +2112,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINXMIN-NEXT:    neg s1, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunssfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI11_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI11_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s1, a0
@@ -2250,7 +2250,7 @@ define i64 @test_round_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixsfdi at plt
+; RV32IZFH-NEXT:    call __fixsfdi
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    lui a2, 524288
 ; RV32IZFH-NEXT:    beqz s0, .LBB13_4
@@ -2309,7 +2309,7 @@ define i64 @test_round_si64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINX-NEXT:    neg s2, s1
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZHINX-NEXT:    call __fixsfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI13_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI13_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s2, a0
@@ -2379,7 +2379,7 @@ define i64 @test_round_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixsfdi
 ; RV32IZFHMIN-NEXT:    lui a4, 524288
 ; RV32IZFHMIN-NEXT:    lui a2, 524288
 ; RV32IZFHMIN-NEXT:    beqz s0, .LBB13_4
@@ -2452,7 +2452,7 @@ define i64 @test_round_si64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINXMIN-NEXT:    neg s2, s1
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixsfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixsfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI13_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI13_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s2, a0
@@ -2665,7 +2665,7 @@ define i64 @test_round_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixunssfdi at plt
+; RV32IZFH-NEXT:    call __fixunssfdi
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI15_1)
 ; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI15_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
@@ -2709,7 +2709,7 @@ define i64 @test_round_ui64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINX-NEXT:    neg s1, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZHINX-NEXT:    call __fixunssfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI15_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI15_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s1, a0
@@ -2766,7 +2766,7 @@ define i64 @test_round_ui64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunssfdi
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI15_0)
 ; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI15_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
@@ -2824,7 +2824,7 @@ define i64 @test_round_ui64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINXMIN-NEXT:    neg s1, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunssfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI15_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI15_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s1, a0
@@ -2962,7 +2962,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixsfdi at plt
+; RV32IZFH-NEXT:    call __fixsfdi
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    lui a2, 524288
 ; RV32IZFH-NEXT:    beqz s0, .LBB17_4
@@ -3021,7 +3021,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINX-NEXT:    neg s2, s1
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZHINX-NEXT:    call __fixsfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI17_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI17_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s2, a0
@@ -3091,7 +3091,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixsfdi
 ; RV32IZFHMIN-NEXT:    lui a4, 524288
 ; RV32IZFHMIN-NEXT:    lui a2, 524288
 ; RV32IZFHMIN-NEXT:    beqz s0, .LBB17_4
@@ -3164,7 +3164,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINXMIN-NEXT:    neg s2, s1
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixsfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixsfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI17_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI17_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s2, a0
@@ -3377,7 +3377,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixunssfdi at plt
+; RV32IZFH-NEXT:    call __fixunssfdi
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI19_1)
 ; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI19_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
@@ -3421,7 +3421,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINX-NEXT:    neg s1, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZHINX-NEXT:    call __fixunssfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI19_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI19_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s1, a0
@@ -3478,7 +3478,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunssfdi
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI19_0)
 ; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI19_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
@@ -3536,7 +3536,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINXMIN-NEXT:    neg s1, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunssfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI19_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI19_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s1, a0
@@ -3674,7 +3674,7 @@ define i64 @test_rint_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixsfdi at plt
+; RV32IZFH-NEXT:    call __fixsfdi
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    lui a2, 524288
 ; RV32IZFH-NEXT:    beqz s0, .LBB21_4
@@ -3733,7 +3733,7 @@ define i64 @test_rint_si64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINX-NEXT:    neg s2, s1
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixsfdi at plt
+; RV32IZHINX-NEXT:    call __fixsfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI21_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI21_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s2, a0
@@ -3803,7 +3803,7 @@ define i64 @test_rint_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
 ; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixsfdi
 ; RV32IZFHMIN-NEXT:    lui a4, 524288
 ; RV32IZFHMIN-NEXT:    lui a2, 524288
 ; RV32IZFHMIN-NEXT:    beqz s0, .LBB21_4
@@ -3876,7 +3876,7 @@ define i64 @test_rint_si64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s s1, a0, s0
 ; RV32IZHINXMIN-NEXT:    neg s2, s1
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixsfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixsfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI21_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI21_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s2, a0
@@ -4089,7 +4089,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
-; RV32IZFH-NEXT:    call __fixunssfdi at plt
+; RV32IZFH-NEXT:    call __fixunssfdi
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI23_1)
 ; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI23_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
@@ -4133,7 +4133,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
 ; RV32IZHINX-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINX-NEXT:    neg s1, a0
 ; RV32IZHINX-NEXT:    mv a0, s0
-; RV32IZHINX-NEXT:    call __fixunssfdi at plt
+; RV32IZHINX-NEXT:    call __fixunssfdi
 ; RV32IZHINX-NEXT:    lui a2, %hi(.LCPI23_1)
 ; RV32IZHINX-NEXT:    lw a2, %lo(.LCPI23_1)(a2)
 ; RV32IZHINX-NEXT:    and a0, s1, a0
@@ -4190,7 +4190,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunssfdi
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI23_0)
 ; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI23_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
@@ -4248,7 +4248,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
 ; RV32IZHINXMIN-NEXT:    fle.s a0, zero, s0
 ; RV32IZHINXMIN-NEXT:    neg s1, a0
 ; RV32IZHINXMIN-NEXT:    mv a0, s0
-; RV32IZHINXMIN-NEXT:    call __fixunssfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunssfdi
 ; RV32IZHINXMIN-NEXT:    lui a2, %hi(.LCPI23_0)
 ; RV32IZHINXMIN-NEXT:    lw a2, %lo(.LCPI23_0)(a2)
 ; RV32IZHINXMIN-NEXT:    and a0, s1, a0
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll
index 84ba49684fc643..173164db5b7807 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll
@@ -323,7 +323,7 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -349,7 +349,7 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -387,7 +387,7 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -427,7 +427,7 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixhfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -760,7 +760,7 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -786,7 +786,7 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -824,7 +824,7 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -864,7 +864,7 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -1197,7 +1197,7 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -1223,7 +1223,7 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -1261,7 +1261,7 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -1301,7 +1301,7 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixhfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -1634,7 +1634,7 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -1660,7 +1660,7 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -1698,7 +1698,7 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -1738,7 +1738,7 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -2071,7 +2071,7 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -2097,7 +2097,7 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -2135,7 +2135,7 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -2175,7 +2175,7 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixhfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -2508,7 +2508,7 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -2534,7 +2534,7 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -2572,7 +2572,7 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -2612,7 +2612,7 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -2945,7 +2945,7 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -2971,7 +2971,7 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -3009,7 +3009,7 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -3049,7 +3049,7 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixhfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -3382,7 +3382,7 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -3408,7 +3408,7 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -3446,7 +3446,7 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -3486,7 +3486,7 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -3819,7 +3819,7 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -3845,7 +3845,7 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixhfdi at plt
+; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -3883,7 +3883,7 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -3923,7 +3923,7 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixhfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
@@ -4256,7 +4256,7 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
-; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -4282,7 +4282,7 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
-; RV32IZHINX-NEXT:    call __fixunshfdi at plt
+; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -4320,7 +4320,7 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -4360,7 +4360,7 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT:    call __fixunshfdi at plt
+; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
index 55c30046366d76..5f9866f08c821d 100644
--- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
+++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
@@ -149,7 +149,7 @@ define dso_local i32 @load_half() nounwind {
 ; RV32-NEXT:  .LBB8_2: # %if.then
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    call abort at plt
+; RV32-NEXT:    call abort
 ;
 ; RV64-LABEL: load_half:
 ; RV64:       # %bb.0: # %entry
@@ -163,7 +163,7 @@ define dso_local i32 @load_half() nounwind {
 ; RV64-NEXT:  .LBB8_2: # %if.then
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    call abort at plt
+; RV64-NEXT:    call abort
 entry:
   %0 = load i16, ptr getelementptr inbounds ([6 x i16], ptr @foo, i32 0, i32 4), align 2
   %cmp = icmp eq i16 %0, 140
diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
index 0ead223d87ca3f..0c9528f291c85d 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
@@ -18,14 +18,14 @@ define dso_local void @handler() nounwind {
 ; CHECK-RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    lui a0, 2
 ; CHECK-RV32-NEXT:    addi a0, a0, 4
-; CHECK-RV32-NEXT:    call read at plt
+; CHECK-RV32-NEXT:    call read
 ; CHECK-RV32-NEXT:    mv s0, a0
-; CHECK-RV32-NEXT:    call callee at plt
+; CHECK-RV32-NEXT:    call callee
 ; CHECK-RV32-NEXT:    mv a0, s0
 ; CHECK-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
-; CHECK-RV32-NEXT:    tail write at plt
+; CHECK-RV32-NEXT:    tail write
 ;
 ; CHECK-RV32-F-LABEL: handler:
 ; CHECK-RV32-F:       # %bb.0: # %entry
@@ -34,14 +34,14 @@ define dso_local void @handler() nounwind {
 ; CHECK-RV32-F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-F-NEXT:    lui a0, 2
 ; CHECK-RV32-F-NEXT:    addi a0, a0, 4
-; CHECK-RV32-F-NEXT:    call read at plt
+; CHECK-RV32-F-NEXT:    call read
 ; CHECK-RV32-F-NEXT:    mv s0, a0
-; CHECK-RV32-F-NEXT:    call callee at plt
+; CHECK-RV32-F-NEXT:    call callee
 ; CHECK-RV32-F-NEXT:    mv a0, s0
 ; CHECK-RV32-F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    addi sp, sp, 16
-; CHECK-RV32-F-NEXT:    tail write at plt
+; CHECK-RV32-F-NEXT:    tail write
 ;
 ; CHECK-RV32-FD-LABEL: handler:
 ; CHECK-RV32-FD:       # %bb.0: # %entry
@@ -50,14 +50,14 @@ define dso_local void @handler() nounwind {
 ; CHECK-RV32-FD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-FD-NEXT:    lui a0, 2
 ; CHECK-RV32-FD-NEXT:    addi a0, a0, 4
-; CHECK-RV32-FD-NEXT:    call read at plt
+; CHECK-RV32-FD-NEXT:    call read
 ; CHECK-RV32-FD-NEXT:    mv s0, a0
-; CHECK-RV32-FD-NEXT:    call callee at plt
+; CHECK-RV32-FD-NEXT:    call callee
 ; CHECK-RV32-FD-NEXT:    mv a0, s0
 ; CHECK-RV32-FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    addi sp, sp, 16
-; CHECK-RV32-FD-NEXT:    tail write at plt
+; CHECK-RV32-FD-NEXT:    tail write
 entry:
   %call = tail call i32 @read(i32 8196)
   tail call void @callee()
diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
index 7d80c34eabcbc7..263743d39a8e68 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
@@ -185,7 +185,7 @@ define void @foo_float() nounwind #0 {
 ; CHECK-RV32-NEXT:    lw a0, %lo(e)(a0)
 ; CHECK-RV32-NEXT:    lui a1, %hi(f)
 ; CHECK-RV32-NEXT:    lw a1, %lo(f)(a1)
-; CHECK-RV32-NEXT:    call __addsf3 at plt
+; CHECK-RV32-NEXT:    call __addsf3
 ; CHECK-RV32-NEXT:    lui a1, %hi(d)
 ; CHECK-RV32-NEXT:    sw a0, %lo(d)(a1)
 ; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
@@ -280,7 +280,7 @@ define void @foo_fp_float() nounwind #1 {
 ; CHECK-RV32-NEXT:    lw a0, %lo(e)(a0)
 ; CHECK-RV32-NEXT:    lui a1, %hi(f)
 ; CHECK-RV32-NEXT:    lw a1, %lo(f)(a1)
-; CHECK-RV32-NEXT:    call __addsf3 at plt
+; CHECK-RV32-NEXT:    call __addsf3
 ; CHECK-RV32-NEXT:    lui a1, %hi(d)
 ; CHECK-RV32-NEXT:    sw a0, %lo(d)(a1)
 ; CHECK-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
@@ -387,7 +387,7 @@ define void @foo_double() nounwind #0 {
 ; CHECK-RV32-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32-NEXT:    call __adddf3 at plt
+; CHECK-RV32-NEXT:    call __adddf3
 ; CHECK-RV32-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32-NEXT:    sw a0, %lo(g)(a2)
@@ -467,7 +467,7 @@ define void @foo_double() nounwind #0 {
 ; CHECK-RV32IF-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32IF-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32IF-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32IF-NEXT:    call __adddf3 at plt
+; CHECK-RV32IF-NEXT:    call __adddf3
 ; CHECK-RV32IF-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32IF-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32IF-NEXT:    sw a0, %lo(g)(a2)
@@ -578,7 +578,7 @@ define void @foo_fp_double() nounwind #1 {
 ; CHECK-RV32-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32-NEXT:    call __adddf3 at plt
+; CHECK-RV32-NEXT:    call __adddf3
 ; CHECK-RV32-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32-NEXT:    sw a0, %lo(g)(a2)
@@ -661,7 +661,7 @@ define void @foo_fp_double() nounwind #1 {
 ; CHECK-RV32IF-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32IF-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32IF-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32IF-NEXT:    call __adddf3 at plt
+; CHECK-RV32IF-NEXT:    call __adddf3
 ; CHECK-RV32IF-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32IF-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32IF-NEXT:    sw a0, %lo(g)(a2)
diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr.ll b/llvm/test/CodeGen/RISCV/interrupt-attr.ll
index 5b269014b8f8fc..5887968042cb00 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr.ll
@@ -63,7 +63,7 @@ define void @foo_with_call() #1 {
 ; CHECK-RV32-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    call otherfoo at plt
+; CHECK-RV32-NEXT:    call otherfoo
 ; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
@@ -134,7 +134,7 @@ define void @foo_with_call() #1 {
 ; CHECK-RV32-F-NEXT:    fsw ft9, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-F-NEXT:    fsw ft10, 4(sp) # 4-byte Folded Spill
 ; CHECK-RV32-F-NEXT:    fsw ft11, 0(sp) # 4-byte Folded Spill
-; CHECK-RV32-F-NEXT:    call otherfoo at plt
+; CHECK-RV32-F-NEXT:    call otherfoo
 ; CHECK-RV32-F-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    lw t0, 184(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    lw t1, 180(sp) # 4-byte Folded Reload
@@ -237,7 +237,7 @@ define void @foo_with_call() #1 {
 ; CHECK-RV32-FD-NEXT:    fsd ft9, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV32-FD-NEXT:    fsd ft10, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV32-FD-NEXT:    fsd ft11, 0(sp) # 8-byte Folded Spill
-; CHECK-RV32-FD-NEXT:    call otherfoo at plt
+; CHECK-RV32-FD-NEXT:    call otherfoo
 ; CHECK-RV32-FD-NEXT:    lw ra, 316(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    lw t0, 312(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    lw t1, 308(sp) # 4-byte Folded Reload
@@ -308,7 +308,7 @@ define void @foo_with_call() #1 {
 ; CHECK-RV64-NEXT:    sd t4, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    sd t5, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    sd t6, 0(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    call otherfoo at plt
+; CHECK-RV64-NEXT:    call otherfoo
 ; CHECK-RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld t0, 112(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld t1, 104(sp) # 8-byte Folded Reload
@@ -379,7 +379,7 @@ define void @foo_with_call() #1 {
 ; CHECK-RV64-F-NEXT:    fsw ft9, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV64-F-NEXT:    fsw ft10, 4(sp) # 4-byte Folded Spill
 ; CHECK-RV64-F-NEXT:    fsw ft11, 0(sp) # 4-byte Folded Spill
-; CHECK-RV64-F-NEXT:    call otherfoo at plt
+; CHECK-RV64-F-NEXT:    call otherfoo
 ; CHECK-RV64-F-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; CHECK-RV64-F-NEXT:    ld t0, 240(sp) # 8-byte Folded Reload
 ; CHECK-RV64-F-NEXT:    ld t1, 232(sp) # 8-byte Folded Reload
@@ -482,7 +482,7 @@ define void @foo_with_call() #1 {
 ; CHECK-RV64-FD-NEXT:    fsd ft9, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV64-FD-NEXT:    fsd ft10, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-FD-NEXT:    fsd ft11, 0(sp) # 8-byte Folded Spill
-; CHECK-RV64-FD-NEXT:    call otherfoo at plt
+; CHECK-RV64-FD-NEXT:    call otherfoo
 ; CHECK-RV64-FD-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; CHECK-RV64-FD-NEXT:    ld t0, 368(sp) # 8-byte Folded Reload
 ; CHECK-RV64-FD-NEXT:    ld t1, 360(sp) # 8-byte Folded Reload
@@ -563,7 +563,7 @@ define void @foo_fp_with_call() #2 {
 ; CHECK-RV32-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    sw t6, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    addi s0, sp, 80
-; CHECK-RV32-NEXT:    call otherfoo at plt
+; CHECK-RV32-NEXT:    call otherfoo
 ; CHECK-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw t0, 72(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw t1, 68(sp) # 4-byte Folded Reload
@@ -637,7 +637,7 @@ define void @foo_fp_with_call() #2 {
 ; CHECK-RV32-F-NEXT:    fsw ft10, 16(sp) # 4-byte Folded Spill
 ; CHECK-RV32-F-NEXT:    fsw ft11, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32-F-NEXT:    addi s0, sp, 208
-; CHECK-RV32-F-NEXT:    call otherfoo at plt
+; CHECK-RV32-F-NEXT:    call otherfoo
 ; CHECK-RV32-F-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    lw t0, 200(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    lw t1, 196(sp) # 4-byte Folded Reload
@@ -743,7 +743,7 @@ define void @foo_fp_with_call() #2 {
 ; CHECK-RV32-FD-NEXT:    fsd ft10, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV32-FD-NEXT:    fsd ft11, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV32-FD-NEXT:    addi s0, sp, 336
-; CHECK-RV32-FD-NEXT:    call otherfoo at plt
+; CHECK-RV32-FD-NEXT:    call otherfoo
 ; CHECK-RV32-FD-NEXT:    lw ra, 332(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    lw t0, 328(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    lw t1, 324(sp) # 4-byte Folded Reload
@@ -817,7 +817,7 @@ define void @foo_fp_with_call() #2 {
 ; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    addi s0, sp, 144
-; CHECK-RV64-NEXT:    call otherfoo at plt
+; CHECK-RV64-NEXT:    call otherfoo
 ; CHECK-RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld t0, 128(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld t1, 120(sp) # 8-byte Folded Reload
@@ -891,7 +891,7 @@ define void @foo_fp_with_call() #2 {
 ; CHECK-RV64-F-NEXT:    fsw ft10, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV64-F-NEXT:    fsw ft11, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV64-F-NEXT:    addi s0, sp, 272
-; CHECK-RV64-F-NEXT:    call otherfoo at plt
+; CHECK-RV64-F-NEXT:    call otherfoo
 ; CHECK-RV64-F-NEXT:    ld ra, 264(sp) # 8-byte Folded Reload
 ; CHECK-RV64-F-NEXT:    ld t0, 256(sp) # 8-byte Folded Reload
 ; CHECK-RV64-F-NEXT:    ld t1, 248(sp) # 8-byte Folded Reload
@@ -997,7 +997,7 @@ define void @foo_fp_with_call() #2 {
 ; CHECK-RV64-FD-NEXT:    fsd ft10, 16(sp) # 8-byte Folded Spill
 ; CHECK-RV64-FD-NEXT:    fsd ft11, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-FD-NEXT:    addi s0, sp, 400
-; CHECK-RV64-FD-NEXT:    call otherfoo at plt
+; CHECK-RV64-FD-NEXT:    call otherfoo
 ; CHECK-RV64-FD-NEXT:    ld ra, 392(sp) # 8-byte Folded Reload
 ; CHECK-RV64-FD-NEXT:    ld t0, 384(sp) # 8-byte Folded Reload
 ; CHECK-RV64-FD-NEXT:    ld t1, 376(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index 60f72c41e83643..bafa92e06834ac 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -74,7 +74,7 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
 ; RV32-NEXT:    li a2, 8
 ; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __muldi3 at plt
+; RV32-NEXT:    call __muldi3
 ; RV32-NEXT:    sw a1, 20(sp)
 ; RV32-NEXT:    sw a0, 16(sp)
 ; RV32-NEXT:    addi a2, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
index 043d48918e3b9d..541fb377425709 100644
--- a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
+++ b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
@@ -28,7 +28,7 @@ define zeroext i8 @udiv8(i8 zeroext %a, i8 zeroext %b) nounwind {
 ; RV32-ALL:       # %bb.0:
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call __udivsi3 at plt
+; RV32-ALL-NEXT:    call __udivsi3
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -37,7 +37,7 @@ define zeroext i8 @udiv8(i8 zeroext %a, i8 zeroext %b) nounwind {
 ; RV64-ALL:       # %bb.0:
 ; RV64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT:    call __udivdi3 at plt
+; RV64-ALL-NEXT:    call __udivdi3
 ; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-ALL-NEXT:    ret
@@ -50,7 +50,7 @@ define signext i16 @sdiv16(i16 signext %a, i16 signext %b) nounwind {
 ; RV32-ALL:       # %bb.0:
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call __divsi3 at plt
+; RV32-ALL-NEXT:    call __divsi3
 ; RV32-ALL-NEXT:    slli a0, a0, 16
 ; RV32-ALL-NEXT:    srai a0, a0, 16
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -61,7 +61,7 @@ define signext i16 @sdiv16(i16 signext %a, i16 signext %b) nounwind {
 ; RV64-ALL:       # %bb.0:
 ; RV64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT:    call __divdi3 at plt
+; RV64-ALL-NEXT:    call __divdi3
 ; RV64-ALL-NEXT:    slli a0, a0, 48
 ; RV64-ALL-NEXT:    srai a0, a0, 48
 ; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -76,7 +76,7 @@ define signext i32 @mul32(i32 %a, i32 %b) nounwind {
 ; RV32-ALL:       # %bb.0:
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call __mulsi3 at plt
+; RV32-ALL-NEXT:    call __mulsi3
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -85,7 +85,7 @@ define signext i32 @mul32(i32 %a, i32 %b) nounwind {
 ; RV64-ALL:       # %bb.0:
 ; RV64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT:    call __muldi3 at plt
+; RV64-ALL-NEXT:    call __muldi3
 ; RV64-ALL-NEXT:    sext.w a0, a0
 ; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ALL-NEXT:    addi sp, sp, 16
@@ -99,14 +99,14 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
 ; RV32-ALL:       # %bb.0:
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call __muldi3 at plt
+; RV32-ALL-NEXT:    call __muldi3
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
 ;
 ; RV64-ALL-LABEL: mul64:
 ; RV64-ALL:       # %bb.0:
-; RV64-ALL-NEXT:    tail __muldi3 at plt
+; RV64-ALL-NEXT:    tail __muldi3
   %1 = mul i64 %a, %b
   ret i64 %1
 }
@@ -120,9 +120,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV32IFD-ILP32D:       # %bb.0:
 ; RV32IFD-ILP32D-NEXT:    addi sp, sp, -16
 ; RV32IFD-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-ILP32D-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-ILP32D-NEXT:    call sinf at plt
-; RV32IFD-ILP32D-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-ILP32D-NEXT:    call __extendhfsf2
+; RV32IFD-ILP32D-NEXT:    call sinf
+; RV32IFD-ILP32D-NEXT:    call __truncsfhf2
 ; RV32IFD-ILP32D-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-ILP32D-NEXT:    lui a1, 1048560
 ; RV32IFD-ILP32D-NEXT:    or a0, a0, a1
@@ -135,9 +135,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV32IF-ILP32F:       # %bb.0:
 ; RV32IF-ILP32F-NEXT:    addi sp, sp, -16
 ; RV32IF-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-ILP32F-NEXT:    call __extendhfsf2 at plt
-; RV32IF-ILP32F-NEXT:    call sinf at plt
-; RV32IF-ILP32F-NEXT:    call __truncsfhf2 at plt
+; RV32IF-ILP32F-NEXT:    call __extendhfsf2
+; RV32IF-ILP32F-NEXT:    call sinf
+; RV32IF-ILP32F-NEXT:    call __truncsfhf2
 ; RV32IF-ILP32F-NEXT:    fmv.x.w a0, fa0
 ; RV32IF-ILP32F-NEXT:    lui a1, 1048560
 ; RV32IF-ILP32F-NEXT:    or a0, a0, a1
@@ -150,9 +150,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV32IFD-ILP32:       # %bb.0:
 ; RV32IFD-ILP32-NEXT:    addi sp, sp, -16
 ; RV32IFD-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-ILP32-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-ILP32-NEXT:    call sinf at plt
-; RV32IFD-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-ILP32-NEXT:    call __extendhfsf2
+; RV32IFD-ILP32-NEXT:    call sinf
+; RV32IFD-ILP32-NEXT:    call __truncsfhf2
 ; RV32IFD-ILP32-NEXT:    lui a1, 1048560
 ; RV32IFD-ILP32-NEXT:    or a0, a0, a1
 ; RV32IFD-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -165,9 +165,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV32I-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-ILP32-NEXT:    slli a0, a0, 16
 ; RV32I-ILP32-NEXT:    srli a0, a0, 16
-; RV32I-ILP32-NEXT:    call __extendhfsf2 at plt
-; RV32I-ILP32-NEXT:    call sinf at plt
-; RV32I-ILP32-NEXT:    call __truncsfhf2 at plt
+; RV32I-ILP32-NEXT:    call __extendhfsf2
+; RV32I-ILP32-NEXT:    call sinf
+; RV32I-ILP32-NEXT:    call __truncsfhf2
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -176,9 +176,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV64IFD-LP64D:       # %bb.0:
 ; RV64IFD-LP64D-NEXT:    addi sp, sp, -16
 ; RV64IFD-LP64D-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-LP64D-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-LP64D-NEXT:    call sinf at plt
-; RV64IFD-LP64D-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-LP64D-NEXT:    call __extendhfsf2
+; RV64IFD-LP64D-NEXT:    call sinf
+; RV64IFD-LP64D-NEXT:    call __truncsfhf2
 ; RV64IFD-LP64D-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-LP64D-NEXT:    lui a1, 1048560
 ; RV64IFD-LP64D-NEXT:    or a0, a0, a1
@@ -191,9 +191,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV64IF-LP64F:       # %bb.0:
 ; RV64IF-LP64F-NEXT:    addi sp, sp, -16
 ; RV64IF-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-LP64F-NEXT:    call __extendhfsf2 at plt
-; RV64IF-LP64F-NEXT:    call sinf at plt
-; RV64IF-LP64F-NEXT:    call __truncsfhf2 at plt
+; RV64IF-LP64F-NEXT:    call __extendhfsf2
+; RV64IF-LP64F-NEXT:    call sinf
+; RV64IF-LP64F-NEXT:    call __truncsfhf2
 ; RV64IF-LP64F-NEXT:    fmv.x.w a0, fa0
 ; RV64IF-LP64F-NEXT:    lui a1, 1048560
 ; RV64IF-LP64F-NEXT:    or a0, a0, a1
@@ -206,9 +206,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV64IFD-LP64:       # %bb.0:
 ; RV64IFD-LP64-NEXT:    addi sp, sp, -16
 ; RV64IFD-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-LP64-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-LP64-NEXT:    call sinf at plt
-; RV64IFD-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-LP64-NEXT:    call __extendhfsf2
+; RV64IFD-LP64-NEXT:    call sinf
+; RV64IFD-LP64-NEXT:    call __truncsfhf2
 ; RV64IFD-LP64-NEXT:    lui a1, 1048560
 ; RV64IFD-LP64-NEXT:    or a0, a0, a1
 ; RV64IFD-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -221,9 +221,9 @@ define half @sin_f16(half %a) nounwind {
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-LP64-NEXT:    slli a0, a0, 48
 ; RV64I-LP64-NEXT:    srli a0, a0, 48
-; RV64I-LP64-NEXT:    call __extendhfsf2 at plt
-; RV64I-LP64-NEXT:    call sinf at plt
-; RV64I-LP64-NEXT:    call __truncsfhf2 at plt
+; RV64I-LP64-NEXT:    call __extendhfsf2
+; RV64I-LP64-NEXT:    call sinf
+; RV64I-LP64-NEXT:    call __truncsfhf2
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -238,17 +238,17 @@ declare float @llvm.sin.f32(float)
 define float @sin_f32(float %a) nounwind {
 ; F-ABI-ALL-LABEL: sin_f32:
 ; F-ABI-ALL:       # %bb.0:
-; F-ABI-ALL-NEXT:    tail sinf at plt
+; F-ABI-ALL-NEXT:    tail sinf
 ;
 ; RV32IFD-ILP32-LABEL: sin_f32:
 ; RV32IFD-ILP32:       # %bb.0:
-; RV32IFD-ILP32-NEXT:    tail sinf at plt
+; RV32IFD-ILP32-NEXT:    tail sinf
 ;
 ; RV32I-ILP32-LABEL: sin_f32:
 ; RV32I-ILP32:       # %bb.0:
 ; RV32I-ILP32-NEXT:    addi sp, sp, -16
 ; RV32I-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT:    call sinf at plt
+; RV32I-ILP32-NEXT:    call sinf
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -257,7 +257,7 @@ define float @sin_f32(float %a) nounwind {
 ; RV64-LP64-ALL:       # %bb.0:
 ; RV64-LP64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-LP64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64-ALL-NEXT:    call sinf at plt
+; RV64-LP64-ALL-NEXT:    call sinf
 ; RV64-LP64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-LP64-ALL-NEXT:    ret
@@ -270,21 +270,21 @@ declare float @llvm.powi.f32.i32(float, i32)
 define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV32IFD-ILP32D-LABEL: powi_f32:
 ; RV32IFD-ILP32D:       # %bb.0:
-; RV32IFD-ILP32D-NEXT:    tail __powisf2 at plt
+; RV32IFD-ILP32D-NEXT:    tail __powisf2
 ;
 ; RV32IF-ILP32F-LABEL: powi_f32:
 ; RV32IF-ILP32F:       # %bb.0:
-; RV32IF-ILP32F-NEXT:    tail __powisf2 at plt
+; RV32IF-ILP32F-NEXT:    tail __powisf2
 ;
 ; RV32IFD-ILP32-LABEL: powi_f32:
 ; RV32IFD-ILP32:       # %bb.0:
-; RV32IFD-ILP32-NEXT:    tail __powisf2 at plt
+; RV32IFD-ILP32-NEXT:    tail __powisf2
 ;
 ; RV32I-ILP32-LABEL: powi_f32:
 ; RV32I-ILP32:       # %bb.0:
 ; RV32I-ILP32-NEXT:    addi sp, sp, -16
 ; RV32I-ILP32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT:    call __powisf2 at plt
+; RV32I-ILP32-NEXT:    call __powisf2
 ; RV32I-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32-NEXT:    addi sp, sp, 16
 ; RV32I-ILP32-NEXT:    ret
@@ -294,7 +294,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV64IFD-LP64D-NEXT:    addi sp, sp, -16
 ; RV64IFD-LP64D-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-LP64D-NEXT:    sext.w a0, a0
-; RV64IFD-LP64D-NEXT:    call __powisf2 at plt
+; RV64IFD-LP64D-NEXT:    call __powisf2
 ; RV64IFD-LP64D-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-LP64D-NEXT:    addi sp, sp, 16
 ; RV64IFD-LP64D-NEXT:    ret
@@ -304,7 +304,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV64IF-LP64F-NEXT:    addi sp, sp, -16
 ; RV64IF-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-LP64F-NEXT:    sext.w a0, a0
-; RV64IF-LP64F-NEXT:    call __powisf2 at plt
+; RV64IF-LP64F-NEXT:    call __powisf2
 ; RV64IF-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-LP64F-NEXT:    addi sp, sp, 16
 ; RV64IF-LP64F-NEXT:    ret
@@ -314,7 +314,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV64-LP64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-LP64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-LP64-ALL-NEXT:    sext.w a1, a1
-; RV64-LP64-ALL-NEXT:    call __powisf2 at plt
+; RV64-LP64-ALL-NEXT:    call __powisf2
 ; RV64-LP64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-LP64-ALL-NEXT:    ret
@@ -329,7 +329,7 @@ define i64 @llround_f32(float %a) nounwind {
 ; RV32-ALL:       # %bb.0:
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call llroundf at plt
+; RV32-ALL-NEXT:    call llroundf
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -354,7 +354,7 @@ define i64 @llround_f32(float %a) nounwind {
 ; RV64I-LP64:       # %bb.0:
 ; RV64I-LP64-NEXT:    addi sp, sp, -16
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT:    call llroundf at plt
+; RV64I-LP64-NEXT:    call llroundf
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -369,13 +369,13 @@ declare double @llvm.sin.f64(double)
 define double @sin_f64(double %a) nounwind {
 ; D-ABI-ALL-LABEL: sin_f64:
 ; D-ABI-ALL:       # %bb.0:
-; D-ABI-ALL-NEXT:    tail sin at plt
+; D-ABI-ALL-NEXT:    tail sin
 ;
 ; RV32IF-ILP32F-LABEL: sin_f64:
 ; RV32IF-ILP32F:       # %bb.0:
 ; RV32IF-ILP32F-NEXT:    addi sp, sp, -16
 ; RV32IF-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-ILP32F-NEXT:    call sin at plt
+; RV32IF-ILP32F-NEXT:    call sin
 ; RV32IF-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-ILP32F-NEXT:    addi sp, sp, 16
 ; RV32IF-ILP32F-NEXT:    ret
@@ -384,7 +384,7 @@ define double @sin_f64(double %a) nounwind {
 ; RV32-ILP32-ALL:       # %bb.0:
 ; RV32-ILP32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ILP32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32-ALL-NEXT:    call sin at plt
+; RV32-ILP32-ALL-NEXT:    call sin
 ; RV32-ILP32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ILP32-ALL-NEXT:    ret
@@ -393,20 +393,20 @@ define double @sin_f64(double %a) nounwind {
 ; RV64IF-LP64F:       # %bb.0:
 ; RV64IF-LP64F-NEXT:    addi sp, sp, -16
 ; RV64IF-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-LP64F-NEXT:    call sin at plt
+; RV64IF-LP64F-NEXT:    call sin
 ; RV64IF-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-LP64F-NEXT:    addi sp, sp, 16
 ; RV64IF-LP64F-NEXT:    ret
 ;
 ; RV64IFD-LP64-LABEL: sin_f64:
 ; RV64IFD-LP64:       # %bb.0:
-; RV64IFD-LP64-NEXT:    tail sin at plt
+; RV64IFD-LP64-NEXT:    tail sin
 ;
 ; RV64I-LP64-LABEL: sin_f64:
 ; RV64I-LP64:       # %bb.0:
 ; RV64I-LP64-NEXT:    addi sp, sp, -16
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT:    call sin at plt
+; RV64I-LP64-NEXT:    call sin
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -419,13 +419,13 @@ declare double @llvm.powi.f64.i32(double, i32)
 define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV32IFD-ILP32D-LABEL: powi_f64:
 ; RV32IFD-ILP32D:       # %bb.0:
-; RV32IFD-ILP32D-NEXT:    tail __powidf2 at plt
+; RV32IFD-ILP32D-NEXT:    tail __powidf2
 ;
 ; RV32IF-ILP32F-LABEL: powi_f64:
 ; RV32IF-ILP32F:       # %bb.0:
 ; RV32IF-ILP32F-NEXT:    addi sp, sp, -16
 ; RV32IF-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-ILP32F-NEXT:    call __powidf2 at plt
+; RV32IF-ILP32F-NEXT:    call __powidf2
 ; RV32IF-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-ILP32F-NEXT:    addi sp, sp, 16
 ; RV32IF-ILP32F-NEXT:    ret
@@ -434,7 +434,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV32-ILP32-ALL:       # %bb.0:
 ; RV32-ILP32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ILP32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32-ALL-NEXT:    call __powidf2 at plt
+; RV32-ILP32-ALL-NEXT:    call __powidf2
 ; RV32-ILP32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ILP32-ALL-NEXT:    ret
@@ -444,7 +444,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV64IFD-LP64D-NEXT:    addi sp, sp, -16
 ; RV64IFD-LP64D-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-LP64D-NEXT:    sext.w a0, a0
-; RV64IFD-LP64D-NEXT:    call __powidf2 at plt
+; RV64IFD-LP64D-NEXT:    call __powidf2
 ; RV64IFD-LP64D-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-LP64D-NEXT:    addi sp, sp, 16
 ; RV64IFD-LP64D-NEXT:    ret
@@ -454,7 +454,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV64IF-LP64F-NEXT:    addi sp, sp, -16
 ; RV64IF-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-LP64F-NEXT:    sext.w a1, a1
-; RV64IF-LP64F-NEXT:    call __powidf2 at plt
+; RV64IF-LP64F-NEXT:    call __powidf2
 ; RV64IF-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-LP64F-NEXT:    addi sp, sp, 16
 ; RV64IF-LP64F-NEXT:    ret
@@ -464,7 +464,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV64-LP64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-LP64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-LP64-ALL-NEXT:    sext.w a1, a1
-; RV64-LP64-ALL-NEXT:    call __powidf2 at plt
+; RV64-LP64-ALL-NEXT:    call __powidf2
 ; RV64-LP64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-LP64-ALL-NEXT:    ret
@@ -479,7 +479,7 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV32-ALL:       # %bb.0:
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call llround at plt
+; RV32-ALL-NEXT:    call llround
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -493,7 +493,7 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV64IF-LP64F:       # %bb.0:
 ; RV64IF-LP64F-NEXT:    addi sp, sp, -16
 ; RV64IF-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-LP64F-NEXT:    call llround at plt
+; RV64IF-LP64F-NEXT:    call llround
 ; RV64IF-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-LP64F-NEXT:    addi sp, sp, 16
 ; RV64IF-LP64F-NEXT:    ret
@@ -508,7 +508,7 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV64I-LP64:       # %bb.0:
 ; RV64I-LP64-NEXT:    addi sp, sp, -16
 ; RV64I-LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT:    call llround at plt
+; RV64I-LP64-NEXT:    call llround
 ; RV64I-LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-LP64-NEXT:    addi sp, sp, 16
 ; RV64I-LP64-NEXT:    ret
@@ -524,7 +524,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ALL-NEXT:    li a1, 0
-; RV32-ALL-NEXT:    call __atomic_load_1 at plt
+; RV32-ALL-NEXT:    call __atomic_load_1
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -534,7 +534,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
 ; RV64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ALL-NEXT:    li a1, 0
-; RV64-ALL-NEXT:    call __atomic_load_1 at plt
+; RV64-ALL-NEXT:    call __atomic_load_1
 ; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-ALL-NEXT:    ret
@@ -548,7 +548,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ALL-NEXT:    li a2, 3
-; RV32-ALL-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32-ALL-NEXT:    call __atomic_fetch_add_2
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -558,7 +558,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ALL-NEXT:    li a2, 3
-; RV64-ALL-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64-ALL-NEXT:    call __atomic_fetch_add_2
 ; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-ALL-NEXT:    ret
@@ -572,7 +572,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ALL-NEXT:    li a2, 4
-; RV32-ALL-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32-ALL-NEXT:    call __atomic_fetch_xor_4
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -582,7 +582,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ALL-NEXT:    li a2, 4
-; RV64-ALL-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64-ALL-NEXT:    call __atomic_fetch_xor_4
 ; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-ALL-NEXT:    ret
@@ -596,7 +596,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32-ALL-NEXT:    addi sp, sp, -16
 ; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ALL-NEXT:    li a3, 5
-; RV32-ALL-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32-ALL-NEXT:    call __atomic_fetch_nand_8
 ; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ALL-NEXT:    addi sp, sp, 16
 ; RV32-ALL-NEXT:    ret
@@ -606,7 +606,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64-ALL-NEXT:    addi sp, sp, -16
 ; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-ALL-NEXT:    li a2, 5
-; RV64-ALL-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64-ALL-NEXT:    call __atomic_fetch_nand_8
 ; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-ALL-NEXT:    addi sp, sp, 16
 ; RV64-ALL-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/llvm.exp10.ll b/llvm/test/CodeGen/RISCV/llvm.exp10.ll
index bfac15e009f00e..6fde86733b07f7 100644
--- a/llvm/test/CodeGen/RISCV/llvm.exp10.ll
+++ b/llvm/test/CodeGen/RISCV/llvm.exp10.ll
@@ -29,9 +29,9 @@ define half @exp10_f16(half %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-NEXT:    lui a1, 1048560
 ; RV32IFD-NEXT:    or a0, a0, a1
@@ -46,9 +46,9 @@ define half @exp10_f16(half %x) {
 ; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    .cfi_offset ra, -8
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    lui a1, 1048560
 ; RV64IFD-NEXT:    or a0, a0, a1
@@ -68,9 +68,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    fmv.w.x fa0, a0
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -83,9 +83,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    .cfi_offset ra, -8
 ; RV64IFD-NEXT:    fmv.w.x fa0, a0
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -107,14 +107,14 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
 ; RV32IFD-NEXT:    .cfi_offset fs0, -16
 ; RV32IFD-NEXT:    fmv.w.x fs0, a1
 ; RV32IFD-NEXT:    fmv.w.x fa0, a0
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w s0, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a1, fa0
 ; RV32IFD-NEXT:    mv a0, s0
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -135,14 +135,14 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
 ; RV64IFD-NEXT:    .cfi_offset s1, -24
 ; RV64IFD-NEXT:    mv s0, a1
 ; RV64IFD-NEXT:    fmv.w.x fa0, a0
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w s1, fa0
 ; RV64IFD-NEXT:    fmv.w.x fa0, s0
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a1, fa0
 ; RV64IFD-NEXT:    mv a0, s1
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -178,24 +178,24 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
 ; RV32IFD-NEXT:    fmv.w.x fs0, a2
 ; RV32IFD-NEXT:    fmv.w.x fs1, a3
 ; RV32IFD-NEXT:    fmv.w.x fa0, a1
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.s fs2, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.x.w a0, fs2
 ; RV32IFD-NEXT:    slli s1, a0, 16
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-NEXT:    slli a0, a0, 16
 ; RV32IFD-NEXT:    srli a0, a0, 16
 ; RV32IFD-NEXT:    or s1, a0, s1
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-NEXT:    sh a0, 4(s0)
 ; RV32IFD-NEXT:    sw s1, 0(s0)
@@ -227,24 +227,24 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
 ; RV64IFD-NEXT:    lhu a1, 8(a1)
 ; RV64IFD-NEXT:    mv s0, a0
 ; RV64IFD-NEXT:    fmv.w.x fa0, a1
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.s fs0, fa0
 ; RV64IFD-NEXT:    fmv.w.x fa0, s2
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.x.w a0, fs0
 ; RV64IFD-NEXT:    slli s2, a0, 16
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    slli a0, a0, 48
 ; RV64IFD-NEXT:    srli a0, a0, 48
 ; RV64IFD-NEXT:    or s2, a0, s2
 ; RV64IFD-NEXT:    fmv.w.x fa0, s1
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    sh a0, 4(s0)
 ; RV64IFD-NEXT:    sw s2, 0(s0)
@@ -291,27 +291,27 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
 ; RV32IFD-NEXT:    fmv.w.x fs1, a2
 ; RV32IFD-NEXT:    fmv.w.x fs2, a3
 ; RV32IFD-NEXT:    fmv.w.x fa0, a1
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.s fs3, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs2
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.s fs2, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.s fs1, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call __extendhfsf2
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.x.w s1, fs1
 ; RV32IFD-NEXT:    fmv.x.w s2, fs2
 ; RV32IFD-NEXT:    fmv.x.w s3, fs3
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-NEXT:    sh a0, 6(s0)
 ; RV32IFD-NEXT:    sh s3, 4(s0)
@@ -355,27 +355,27 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
 ; RV64IFD-NEXT:    lhu a1, 16(a1)
 ; RV64IFD-NEXT:    mv s0, a0
 ; RV64IFD-NEXT:    fmv.w.x fa0, a1
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.s fs0, fa0
 ; RV64IFD-NEXT:    fmv.w.x fa0, s3
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.s fs1, fa0
 ; RV64IFD-NEXT:    fmv.w.x fa0, s2
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.s fs2, fa0
 ; RV64IFD-NEXT:    fmv.w.x fa0, s1
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call __extendhfsf2
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.x.w s1, fs2
 ; RV64IFD-NEXT:    fmv.x.w s2, fs1
 ; RV64IFD-NEXT:    fmv.x.w s3, fs0
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    sh a0, 6(s0)
 ; RV64IFD-NEXT:    sh s3, 4(s0)
@@ -398,7 +398,7 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
 define float @exp10_f32(float %x) {
 ; CHECK-LABEL: exp10_f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    tail exp10f at plt
+; CHECK-NEXT:    tail exp10f
   %r = call float @llvm.exp10.f32(float %x)
   ret float %r
 }
@@ -410,7 +410,7 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
 ; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -421,7 +421,7 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
 ; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    .cfi_offset ra, -8
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -441,10 +441,10 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
 ; RV32IFD-NEXT:    .cfi_offset fs0, -16
 ; RV32IFD-NEXT:    .cfi_offset fs1, -24
 ; RV32IFD-NEXT:    fmv.s fs0, fa1
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.s fs1, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.s fa1, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -464,10 +464,10 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
 ; RV64IFD-NEXT:    .cfi_offset fs0, -16
 ; RV64IFD-NEXT:    .cfi_offset fs1, -24
 ; RV64IFD-NEXT:    fmv.s fs0, fa1
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.s fs1, fa0
 ; RV64IFD-NEXT:    fmv.s fa0, fs0
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.s fa1, fa0
 ; RV64IFD-NEXT:    fmv.s fa0, fs1
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -497,13 +497,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
 ; RV32IFD-NEXT:    fmv.s fs0, fa2
 ; RV32IFD-NEXT:    fmv.s fs1, fa1
 ; RV32IFD-NEXT:    mv s0, a0
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.s fs2, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.s fs1, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fsw fa0, 8(s0)
 ; RV32IFD-NEXT:    fsw fs1, 4(s0)
 ; RV32IFD-NEXT:    fsw fs2, 0(s0)
@@ -533,17 +533,17 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
 ; RV64IFD-NEXT:    fmv.s fs1, fa0
 ; RV64IFD-NEXT:    mv s0, a0
 ; RV64IFD-NEXT:    fmv.s fa0, fa1
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    slli s1, a0, 32
 ; RV64IFD-NEXT:    fmv.s fa0, fs1
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    slli a0, a0, 32
 ; RV64IFD-NEXT:    srli a0, a0, 32
 ; RV64IFD-NEXT:    or s1, a0, s1
 ; RV64IFD-NEXT:    fmv.s fa0, fs0
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fsw fa0, 8(s0)
 ; RV64IFD-NEXT:    sd s1, 0(s0)
 ; RV64IFD-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
@@ -578,16 +578,16 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
 ; RV32IFD-NEXT:    fmv.s fs1, fa2
 ; RV32IFD-NEXT:    fmv.s fs2, fa1
 ; RV32IFD-NEXT:    mv s0, a0
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.s fs3, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs2
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.s fs2, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fmv.s fs1, fa0
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call exp10f at plt
+; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    fsw fa0, 12(s0)
 ; RV32IFD-NEXT:    fsw fs1, 8(s0)
 ; RV32IFD-NEXT:    fsw fs2, 4(s0)
@@ -621,16 +621,16 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
 ; RV64IFD-NEXT:    fmv.s fs1, fa2
 ; RV64IFD-NEXT:    fmv.s fs2, fa1
 ; RV64IFD-NEXT:    mv s0, a0
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.s fs3, fa0
 ; RV64IFD-NEXT:    fmv.s fa0, fs2
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.s fs2, fa0
 ; RV64IFD-NEXT:    fmv.s fa0, fs1
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fmv.s fs1, fa0
 ; RV64IFD-NEXT:    fmv.s fa0, fs0
-; RV64IFD-NEXT:    call exp10f at plt
+; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    fsw fa0, 12(s0)
 ; RV64IFD-NEXT:    fsw fs1, 8(s0)
 ; RV64IFD-NEXT:    fsw fs2, 4(s0)
@@ -650,7 +650,7 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
 define double @exp10_f64(double %x) {
 ; CHECK-LABEL: exp10_f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    tail exp10 at plt
+; CHECK-NEXT:    tail exp10
   %r = call double @llvm.exp10.f64(double %x)
   ret double %r
 }
@@ -673,10 +673,10 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
 ; RV32IFD-NEXT:    .cfi_offset fs0, -16
 ; RV32IFD-NEXT:    .cfi_offset fs1, -24
 ; RV32IFD-NEXT:    fmv.d fs0, fa1
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fmv.d fs1, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs0
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fmv.d fa1, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs1
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -696,10 +696,10 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
 ; RV64IFD-NEXT:    .cfi_offset fs0, -16
 ; RV64IFD-NEXT:    .cfi_offset fs1, -24
 ; RV64IFD-NEXT:    fmv.d fs0, fa1
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fmv.d fs1, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs0
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fmv.d fa1, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs1
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -729,13 +729,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
 ; RV32IFD-NEXT:    fmv.d fs0, fa2
 ; RV32IFD-NEXT:    fmv.d fs1, fa1
 ; RV32IFD-NEXT:    mv s0, a0
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fmv.d fs2, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs1
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fmv.d fs1, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs0
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fsd fa0, 16(s0)
 ; RV32IFD-NEXT:    fsd fs1, 8(s0)
 ; RV32IFD-NEXT:    fsd fs2, 0(s0)
@@ -764,13 +764,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
 ; RV64IFD-NEXT:    fmv.d fs0, fa2
 ; RV64IFD-NEXT:    fmv.d fs1, fa1
 ; RV64IFD-NEXT:    mv s0, a0
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fmv.d fs2, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs1
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fmv.d fs1, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs0
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fsd fa0, 16(s0)
 ; RV64IFD-NEXT:    fsd fs1, 8(s0)
 ; RV64IFD-NEXT:    fsd fs2, 0(s0)
@@ -806,16 +806,16 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
 ; RV32IFD-NEXT:    fmv.d fs1, fa2
 ; RV32IFD-NEXT:    fmv.d fs2, fa1
 ; RV32IFD-NEXT:    mv s0, a0
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fmv.d fs3, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs2
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fmv.d fs2, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs1
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fmv.d fs1, fa0
 ; RV32IFD-NEXT:    fmv.d fa0, fs0
-; RV32IFD-NEXT:    call exp10 at plt
+; RV32IFD-NEXT:    call exp10
 ; RV32IFD-NEXT:    fsd fa0, 24(s0)
 ; RV32IFD-NEXT:    fsd fs1, 16(s0)
 ; RV32IFD-NEXT:    fsd fs2, 8(s0)
@@ -849,16 +849,16 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
 ; RV64IFD-NEXT:    fmv.d fs1, fa2
 ; RV64IFD-NEXT:    fmv.d fs2, fa1
 ; RV64IFD-NEXT:    mv s0, a0
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fmv.d fs3, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs2
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fmv.d fs2, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs1
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fmv.d fs1, fa0
 ; RV64IFD-NEXT:    fmv.d fa0, fs0
-; RV64IFD-NEXT:    call exp10 at plt
+; RV64IFD-NEXT:    call exp10
 ; RV64IFD-NEXT:    fsd fa0, 24(s0)
 ; RV64IFD-NEXT:    fsd fs1, 16(s0)
 ; RV64IFD-NEXT:    fsd fs2, 8(s0)
diff --git a/llvm/test/CodeGen/RISCV/llvm.frexp.ll b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
index 94b9444dfaf8c5..30f9dd1e516585 100644
--- a/llvm/test/CodeGen/RISCV/llvm.frexp.ll
+++ b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
@@ -23,10 +23,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call frexpf
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a1, fa0
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lui a2, 1048560
@@ -40,10 +40,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call frexpf
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a1, fa0
 ; RV64IFD-NEXT:    ld a0, 0(sp)
 ; RV64IFD-NEXT:    lui a2, 1048560
@@ -57,10 +57,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __extendhfsf2 at plt
+; RV32IZFINXZDINX-NEXT:    call __extendhfsf2
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
-; RV32IZFINXZDINX-NEXT:    call __truncsfhf2 at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
+; RV32IZFINXZDINX-NEXT:    call __truncsfhf2
 ; RV32IZFINXZDINX-NEXT:    lw a1, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lui a2, 1048560
 ; RV32IZFINXZDINX-NEXT:    or a0, a0, a2
@@ -72,10 +72,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call __extendhfsf2 at plt
+; RV64IZFINXZDINX-NEXT:    call __extendhfsf2
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
-; RV64IZFINXZDINX-NEXT:    call __truncsfhf2 at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
+; RV64IZFINXZDINX-NEXT:    call __truncsfhf2
 ; RV64IZFINXZDINX-NEXT:    ld a1, 0(sp)
 ; RV64IZFINXZDINX-NEXT:    lui a2, 1048560
 ; RV64IZFINXZDINX-NEXT:    or a0, a0, a2
@@ -89,10 +89,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    call frexpf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call frexpf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw a1, 8(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -104,10 +104,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexpf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call frexpf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    lw a1, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -121,10 +121,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
-; RV32IFD-NEXT:    call __truncsfhf2 at plt
+; RV32IFD-NEXT:    call frexpf
+; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-NEXT:    lui a1, 1048560
 ; RV32IFD-NEXT:    or a0, a0, a1
@@ -137,10 +137,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
-; RV64IFD-NEXT:    call __truncsfhf2 at plt
+; RV64IFD-NEXT:    call frexpf
+; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    lui a1, 1048560
 ; RV64IFD-NEXT:    or a0, a0, a1
@@ -153,10 +153,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __extendhfsf2 at plt
+; RV32IZFINXZDINX-NEXT:    call __extendhfsf2
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
-; RV32IZFINXZDINX-NEXT:    call __truncsfhf2 at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
+; RV32IZFINXZDINX-NEXT:    call __truncsfhf2
 ; RV32IZFINXZDINX-NEXT:    lui a1, 1048560
 ; RV32IZFINXZDINX-NEXT:    or a0, a0, a1
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -167,10 +167,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call __extendhfsf2 at plt
+; RV64IZFINXZDINX-NEXT:    call __extendhfsf2
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
-; RV64IZFINXZDINX-NEXT:    call __truncsfhf2 at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
+; RV64IZFINXZDINX-NEXT:    call __truncsfhf2
 ; RV64IZFINXZDINX-NEXT:    lui a1, 1048560
 ; RV64IZFINXZDINX-NEXT:    or a0, a0, a1
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -183,10 +183,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    call frexpf at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call frexpf
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -197,10 +197,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexpf at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call frexpf
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -214,9 +214,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __extendhfsf2 at plt
+; RV32IFD-NEXT:    call __extendhfsf2
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -226,9 +226,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call __extendhfsf2 at plt
+; RV64IFD-NEXT:    call __extendhfsf2
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    ld a0, 0(sp)
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -238,9 +238,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
 ; RV32IZFINXZDINX:       # %bb.0:
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT:    call __extendhfsf2 at plt
+; RV32IZFINXZDINX-NEXT:    call __extendhfsf2
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    lw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -250,9 +250,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
 ; RV64IZFINXZDINX:       # %bb.0:
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT:    call __extendhfsf2 at plt
+; RV64IZFINXZDINX-NEXT:    call __extendhfsf2
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    ld a0, 0(sp)
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -264,9 +264,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    lw a0, 8(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -278,9 +278,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    lw a0, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -314,7 +314,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -325,7 +325,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    ld a0, 0(sp)
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -336,7 +336,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    lw a1, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -347,7 +347,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    ld a1, 0(sp)
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -358,7 +358,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    lw a1, 8(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -369,7 +369,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    lw a1, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -384,7 +384,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -394,7 +394,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -404,7 +404,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -414,7 +414,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -424,7 +424,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -434,7 +434,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -449,7 +449,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -460,7 +460,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    ld a0, 0(sp)
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -471,7 +471,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    lw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -482,7 +482,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    ld a0, 0(sp)
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -493,7 +493,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    lw a0, 8(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -504,7 +504,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    lw a0, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -547,19 +547,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
 ; RV32IFD-NEXT:    fmv.s fs2, fa1
 ; RV32IFD-NEXT:    mv s0, a0
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    fmv.s fs3, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 12
 ; RV32IFD-NEXT:    fmv.s fa0, fs2
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    fmv.s fs2, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 16
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    fmv.s fs1, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 20
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    lw a0, 20(sp)
 ; RV32IFD-NEXT:    lw a1, 16(sp)
 ; RV32IFD-NEXT:    lw a2, 12(sp)
@@ -595,19 +595,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
 ; RV64IFD-NEXT:    fmv.s fs2, fa1
 ; RV64IFD-NEXT:    mv s0, a0
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    fmv.s fs3, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 8
 ; RV64IFD-NEXT:    fmv.s fa0, fs2
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    fmv.s fs2, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 16
 ; RV64IFD-NEXT:    fmv.s fa0, fs1
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    fmv.s fs1, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 24
 ; RV64IFD-NEXT:    fmv.s fa0, fs0
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    ld a0, 24(sp)
 ; RV64IFD-NEXT:    ld a1, 16(sp)
 ; RV64IFD-NEXT:    ld a2, 8(sp)
@@ -645,19 +645,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
 ; RV32IZFINXZDINX-NEXT:    mv s3, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
 ; RV32IZFINXZDINX-NEXT:    mv a0, a2
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    mv s4, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 12
 ; RV32IZFINXZDINX-NEXT:    mv a0, s2
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    mv s2, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 16
 ; RV32IZFINXZDINX-NEXT:    mv a0, s1
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    mv s1, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 20
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    lw a1, 20(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a2, 16(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a3, 12(sp)
@@ -695,19 +695,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
 ; RV64IZFINXZDINX-NEXT:    mv s3, a0
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
 ; RV64IZFINXZDINX-NEXT:    mv a0, a2
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    mv s4, a0
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 8
 ; RV64IZFINXZDINX-NEXT:    mv a0, s2
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    mv s2, a0
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 16
 ; RV64IZFINXZDINX-NEXT:    mv a0, s1
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    mv s1, a0
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 24
 ; RV64IZFINXZDINX-NEXT:    mv a0, s0
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    ld a1, 24(sp)
 ; RV64IZFINXZDINX-NEXT:    ld a2, 16(sp)
 ; RV64IZFINXZDINX-NEXT:    ld a3, 8(sp)
@@ -745,19 +745,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, sp, 12
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    addi a1, sp, 16
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, sp, 20
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    lw a1, 8(sp)
 ; RV32I-NEXT:    lw a2, 12(sp)
 ; RV32I-NEXT:    lw a3, 16(sp)
@@ -795,19 +795,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    addi a1, sp, 8
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    lw a1, 0(sp)
 ; RV64I-NEXT:    lw a2, 4(sp)
 ; RV64I-NEXT:    lw a3, 8(sp)
@@ -847,19 +847,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
 ; RV32IFD-NEXT:    fmv.s fs2, fa1
 ; RV32IFD-NEXT:    mv s0, a0
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    fmv.s fs3, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 12
 ; RV32IFD-NEXT:    fmv.s fa0, fs2
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    fmv.s fs2, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 16
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    fmv.s fs1, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 20
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    fsw fa0, 12(s0)
 ; RV32IFD-NEXT:    fsw fs1, 8(s0)
 ; RV32IFD-NEXT:    fsw fs2, 4(s0)
@@ -887,19 +887,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
 ; RV64IFD-NEXT:    fmv.s fs2, fa1
 ; RV64IFD-NEXT:    mv s0, a0
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    fmv.s fs3, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 8
 ; RV64IFD-NEXT:    fmv.s fa0, fs2
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    fmv.s fs2, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 16
 ; RV64IFD-NEXT:    fmv.s fa0, fs1
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    fmv.s fs1, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 24
 ; RV64IFD-NEXT:    fmv.s fa0, fs0
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    fsw fa0, 12(s0)
 ; RV64IFD-NEXT:    fsw fs1, 8(s0)
 ; RV64IFD-NEXT:    fsw fs2, 4(s0)
@@ -929,19 +929,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
 ; RV32IZFINXZDINX-NEXT:    mv s3, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
 ; RV32IZFINXZDINX-NEXT:    mv a0, a2
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    mv s4, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 12
 ; RV32IZFINXZDINX-NEXT:    mv a0, s2
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    mv s2, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 16
 ; RV32IZFINXZDINX-NEXT:    mv a0, s1
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    mv s1, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 20
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    sw a0, 12(s3)
 ; RV32IZFINXZDINX-NEXT:    sw s1, 8(s3)
 ; RV32IZFINXZDINX-NEXT:    sw s2, 4(s3)
@@ -971,19 +971,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
 ; RV64IZFINXZDINX-NEXT:    mv s3, a0
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
 ; RV64IZFINXZDINX-NEXT:    mv a0, a2
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    mv s4, a0
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 8
 ; RV64IZFINXZDINX-NEXT:    mv a0, s2
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    mv s2, a0
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 16
 ; RV64IZFINXZDINX-NEXT:    mv a0, s1
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    mv s1, a0
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 24
 ; RV64IZFINXZDINX-NEXT:    mv a0, s0
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    sw a0, 12(s3)
 ; RV64IZFINXZDINX-NEXT:    sw s1, 8(s3)
 ; RV64IZFINXZDINX-NEXT:    sw s2, 4(s3)
@@ -1013,19 +1013,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, sp, 12
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    addi a1, sp, 16
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, sp, 20
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    sw a0, 12(s3)
 ; RV32I-NEXT:    sw s1, 8(s3)
 ; RV32I-NEXT:    sw s2, 4(s3)
@@ -1055,19 +1055,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    addi a1, sp, 8
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    sw a0, 12(s3)
 ; RV64I-NEXT:    sw s1, 8(s3)
 ; RV64I-NEXT:    sw s2, 4(s3)
@@ -1099,16 +1099,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
 ; RV32IFD-NEXT:    fmv.s fs2, fa1
 ; RV32IFD-NEXT:    mv s0, a0
 ; RV32IFD-NEXT:    mv a0, sp
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    addi a0, sp, 4
 ; RV32IFD-NEXT:    fmv.s fa0, fs2
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    addi a0, sp, 8
 ; RV32IFD-NEXT:    fmv.s fa0, fs1
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    addi a0, sp, 12
 ; RV32IFD-NEXT:    fmv.s fa0, fs0
-; RV32IFD-NEXT:    call frexpf at plt
+; RV32IFD-NEXT:    call frexpf
 ; RV32IFD-NEXT:    lw a0, 12(sp)
 ; RV32IFD-NEXT:    lw a1, 8(sp)
 ; RV32IFD-NEXT:    lw a2, 4(sp)
@@ -1138,16 +1138,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
 ; RV64IFD-NEXT:    fmv.s fs2, fa1
 ; RV64IFD-NEXT:    mv s0, a0
 ; RV64IFD-NEXT:    addi a0, sp, 8
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    addi a0, sp, 16
 ; RV64IFD-NEXT:    fmv.s fa0, fs2
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    addi a0, sp, 24
 ; RV64IFD-NEXT:    fmv.s fa0, fs1
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    addi a0, sp, 32
 ; RV64IFD-NEXT:    fmv.s fa0, fs0
-; RV64IFD-NEXT:    call frexpf at plt
+; RV64IFD-NEXT:    call frexpf
 ; RV64IFD-NEXT:    ld a0, 32(sp)
 ; RV64IFD-NEXT:    ld a1, 24(sp)
 ; RV64IFD-NEXT:    ld a2, 16(sp)
@@ -1179,16 +1179,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    mv s3, a0
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 12
 ; RV32IZFINXZDINX-NEXT:    mv a0, a2
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 16
 ; RV32IZFINXZDINX-NEXT:    mv a0, s2
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 20
 ; RV32IZFINXZDINX-NEXT:    mv a0, s1
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 24
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
-; RV32IZFINXZDINX-NEXT:    call frexpf at plt
+; RV32IZFINXZDINX-NEXT:    call frexpf
 ; RV32IZFINXZDINX-NEXT:    lw a0, 24(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a1, 20(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a2, 16(sp)
@@ -1220,16 +1220,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    mv s3, a0
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 8
 ; RV64IZFINXZDINX-NEXT:    mv a0, a2
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 16
 ; RV64IZFINXZDINX-NEXT:    mv a0, s2
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 24
 ; RV64IZFINXZDINX-NEXT:    mv a0, s1
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    addi a1, sp, 32
 ; RV64IZFINXZDINX-NEXT:    mv a0, s0
-; RV64IZFINXZDINX-NEXT:    call frexpf at plt
+; RV64IZFINXZDINX-NEXT:    call frexpf
 ; RV64IZFINXZDINX-NEXT:    ld a0, 32(sp)
 ; RV64IZFINXZDINX-NEXT:    ld a1, 24(sp)
 ; RV64IZFINXZDINX-NEXT:    ld a2, 16(sp)
@@ -1261,16 +1261,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    addi a1, sp, 12
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    addi a1, sp, 16
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    addi a1, sp, 20
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    addi a1, sp, 24
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call frexpf at plt
+; RV32I-NEXT:    call frexpf
 ; RV32I-NEXT:    lw a0, 24(sp)
 ; RV32I-NEXT:    lw a1, 20(sp)
 ; RV32I-NEXT:    lw a2, 16(sp)
@@ -1302,16 +1302,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    addi a1, sp, 8
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    addi a1, sp, 16
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    addi a1, sp, 20
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call frexpf at plt
+; RV64I-NEXT:    call frexpf
 ; RV64I-NEXT:    lw a0, 20(sp)
 ; RV64I-NEXT:    lw a1, 16(sp)
 ; RV64I-NEXT:    lw a2, 12(sp)
@@ -1338,7 +1338,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexp at plt
+; RV32IFD-NEXT:    call frexp
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -1349,7 +1349,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexp at plt
+; RV64IFD-NEXT:    call frexp
 ; RV64IFD-NEXT:    ld a0, 0(sp)
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -1360,7 +1360,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    addi a2, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexp at plt
+; RV32IZFINXZDINX-NEXT:    call frexp
 ; RV32IZFINXZDINX-NEXT:    lw a2, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -1371,7 +1371,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexp at plt
+; RV64IZFINXZDINX-NEXT:    call frexp
 ; RV64IZFINXZDINX-NEXT:    ld a1, 0(sp)
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -1387,7 +1387,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
 ; RV32I-NEXT:    addi a2, sp, 4
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:    call frexp at plt
+; RV32I-NEXT:    call frexp
 ; RV32I-NEXT:    lw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 4(s0)
 ; RV32I-NEXT:    sw a0, 0(s0)
@@ -1402,7 +1402,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexp at plt
+; RV64I-NEXT:    call frexp
 ; RV64I-NEXT:    lw a1, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1417,7 +1417,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexp at plt
+; RV32IFD-NEXT:    call frexp
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
@@ -1427,7 +1427,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexp at plt
+; RV64IFD-NEXT:    call frexp
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1437,7 +1437,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    addi a2, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexp at plt
+; RV32IZFINXZDINX-NEXT:    call frexp
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -1447,7 +1447,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexp at plt
+; RV64IZFINXZDINX-NEXT:    call frexp
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1457,7 +1457,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, sp, 8
-; RV32I-NEXT:    call frexp at plt
+; RV32I-NEXT:    call frexp
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1467,7 +1467,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexp at plt
+; RV64I-NEXT:    call frexp
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1482,7 +1482,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, sp, 8
-; RV32IFD-NEXT:    call frexp at plt
+; RV32IFD-NEXT:    call frexp
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -1493,7 +1493,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call frexp at plt
+; RV64IFD-NEXT:    call frexp
 ; RV64IFD-NEXT:    ld a0, 0(sp)
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -1504,7 +1504,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFINXZDINX-NEXT:    addi a2, sp, 8
-; RV32IZFINXZDINX-NEXT:    call frexp at plt
+; RV32IZFINXZDINX-NEXT:    call frexp
 ; RV32IZFINXZDINX-NEXT:    lw a0, 8(sp)
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -1515,7 +1515,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    mv a1, sp
-; RV64IZFINXZDINX-NEXT:    call frexp at plt
+; RV64IZFINXZDINX-NEXT:    call frexp
 ; RV64IZFINXZDINX-NEXT:    ld a0, 0(sp)
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -1526,7 +1526,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, sp, 8
-; RV32I-NEXT:    call frexp at plt
+; RV32I-NEXT:    call frexp
 ; RV32I-NEXT:    lw a0, 8(sp)
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1537,7 +1537,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    call frexp at plt
+; RV64I-NEXT:    call frexp
 ; RV64I-NEXT:    lw a0, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1583,7 +1583,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
 ; RV32IFD-NEXT:    mv a1, sp
 ; RV32IFD-NEXT:    addi a2, sp, 36
 ; RV32IFD-NEXT:    sw a3, 0(sp)
-; RV32IFD-NEXT:    call frexpl at plt
+; RV32IFD-NEXT:    call frexpl
 ; RV32IFD-NEXT:    lw a0, 36(sp)
 ; RV32IFD-NEXT:    lw a1, 28(sp)
 ; RV32IFD-NEXT:    lw a2, 24(sp)
@@ -1609,7 +1609,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
 ; RV64IFD-NEXT:    addi a2, sp, 12
 ; RV64IFD-NEXT:    mv a0, a1
 ; RV64IFD-NEXT:    mv a1, a3
-; RV64IFD-NEXT:    call frexpl at plt
+; RV64IFD-NEXT:    call frexpl
 ; RV64IFD-NEXT:    lw a2, 12(sp)
 ; RV64IFD-NEXT:    sd a1, 8(s0)
 ; RV64IFD-NEXT:    sd a0, 0(s0)
@@ -1636,7 +1636,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    mv a1, sp
 ; RV32IZFINXZDINX-NEXT:    addi a2, sp, 36
 ; RV32IZFINXZDINX-NEXT:    sw a3, 0(sp)
-; RV32IZFINXZDINX-NEXT:    call frexpl at plt
+; RV32IZFINXZDINX-NEXT:    call frexpl
 ; RV32IZFINXZDINX-NEXT:    lw a0, 36(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a1, 28(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a2, 24(sp)
@@ -1662,7 +1662,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi a2, sp, 12
 ; RV64IZFINXZDINX-NEXT:    mv a0, a1
 ; RV64IZFINXZDINX-NEXT:    mv a1, a3
-; RV64IZFINXZDINX-NEXT:    call frexpl at plt
+; RV64IZFINXZDINX-NEXT:    call frexpl
 ; RV64IZFINXZDINX-NEXT:    lw a2, 12(sp)
 ; RV64IZFINXZDINX-NEXT:    sd a1, 8(s0)
 ; RV64IZFINXZDINX-NEXT:    sd a0, 0(s0)
@@ -1689,7 +1689,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    addi a2, sp, 36
 ; RV32I-NEXT:    sw a3, 0(sp)
-; RV32I-NEXT:    call frexpl at plt
+; RV32I-NEXT:    call frexpl
 ; RV32I-NEXT:    lw a0, 36(sp)
 ; RV32I-NEXT:    lw a1, 28(sp)
 ; RV32I-NEXT:    lw a2, 24(sp)
@@ -1715,7 +1715,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
 ; RV64I-NEXT:    addi a2, sp, 12
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    mv a1, a3
-; RV64I-NEXT:    call frexpl at plt
+; RV64I-NEXT:    call frexpl
 ; RV64I-NEXT:    lw a2, 12(sp)
 ; RV64I-NEXT:    sd a1, 8(s0)
 ; RV64I-NEXT:    sd a0, 0(s0)
@@ -1746,7 +1746,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
 ; RV32IFD-NEXT:    mv a1, sp
 ; RV32IFD-NEXT:    addi a2, sp, 36
 ; RV32IFD-NEXT:    sw a3, 0(sp)
-; RV32IFD-NEXT:    call frexpl at plt
+; RV32IFD-NEXT:    call frexpl
 ; RV32IFD-NEXT:    lw a0, 28(sp)
 ; RV32IFD-NEXT:    lw a1, 24(sp)
 ; RV32IFD-NEXT:    lw a2, 20(sp)
@@ -1765,7 +1765,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    addi a2, sp, 4
-; RV64IFD-NEXT:    call frexpl at plt
+; RV64IFD-NEXT:    call frexpl
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
@@ -1787,7 +1787,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    mv a1, sp
 ; RV32IZFINXZDINX-NEXT:    addi a2, sp, 36
 ; RV32IZFINXZDINX-NEXT:    sw a3, 0(sp)
-; RV32IZFINXZDINX-NEXT:    call frexpl at plt
+; RV32IZFINXZDINX-NEXT:    call frexpl
 ; RV32IZFINXZDINX-NEXT:    lw a0, 28(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a1, 24(sp)
 ; RV32IZFINXZDINX-NEXT:    lw a2, 20(sp)
@@ -1806,7 +1806,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    addi a2, sp, 4
-; RV64IZFINXZDINX-NEXT:    call frexpl at plt
+; RV64IZFINXZDINX-NEXT:    call frexpl
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
 ; RV64IZFINXZDINX-NEXT:    ret
@@ -1828,7 +1828,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    addi a2, sp, 36
 ; RV32I-NEXT:    sw a3, 0(sp)
-; RV32I-NEXT:    call frexpl at plt
+; RV32I-NEXT:    call frexpl
 ; RV32I-NEXT:    lw a0, 28(sp)
 ; RV32I-NEXT:    lw a1, 24(sp)
 ; RV32I-NEXT:    lw a2, 20(sp)
@@ -1847,7 +1847,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, sp, 4
-; RV64I-NEXT:    call frexpl at plt
+; RV64I-NEXT:    call frexpl
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1872,7 +1872,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
 ; RV32IFD-NEXT:    addi a1, sp, 8
 ; RV32IFD-NEXT:    addi a2, sp, 40
 ; RV32IFD-NEXT:    sw a3, 8(sp)
-; RV32IFD-NEXT:    call frexpl at plt
+; RV32IFD-NEXT:    call frexpl
 ; RV32IFD-NEXT:    lw a0, 40(sp)
 ; RV32IFD-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 48
@@ -1883,7 +1883,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    addi a2, sp, 4
-; RV64IFD-NEXT:    call frexpl at plt
+; RV64IFD-NEXT:    call frexpl
 ; RV64IFD-NEXT:    lw a0, 4(sp)
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -1904,7 +1904,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    addi a1, sp, 8
 ; RV32IZFINXZDINX-NEXT:    addi a2, sp, 40
 ; RV32IZFINXZDINX-NEXT:    sw a3, 8(sp)
-; RV32IZFINXZDINX-NEXT:    call frexpl at plt
+; RV32IZFINXZDINX-NEXT:    call frexpl
 ; RV32IZFINXZDINX-NEXT:    lw a0, 40(sp)
 ; RV32IZFINXZDINX-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 48
@@ -1915,7 +1915,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
 ; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFINXZDINX-NEXT:    addi a2, sp, 4
-; RV64IZFINXZDINX-NEXT:    call frexpl at plt
+; RV64IZFINXZDINX-NEXT:    call frexpl
 ; RV64IZFINXZDINX-NEXT:    lw a0, 4(sp)
 ; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
@@ -1936,7 +1936,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a2, sp, 40
 ; RV32I-NEXT:    sw a3, 8(sp)
-; RV32I-NEXT:    call frexpl at plt
+; RV32I-NEXT:    call frexpl
 ; RV32I-NEXT:    lw a0, 40(sp)
 ; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
@@ -1947,7 +1947,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, sp, 4
-; RV64I-NEXT:    call frexpl at plt
+; RV64I-NEXT:    call frexpl
 ; RV64I-NEXT:    lw a0, 4(sp)
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll b/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll
index 45c582a6133088..a1a6fa1f62a549 100644
--- a/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll
+++ b/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll
@@ -147,47 +147,47 @@ declare void @exit(i32 signext) noreturn
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT:    call nge at plt
+; RV64I-NEXT:    call nge
 ; RV64I-NEXT:    bnez a0, .LBB4_9
 ; RV64I-NEXT:  # %bb.1: # %if.end
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call nge at plt
+; RV64I-NEXT:    call nge
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    bne a0, a1, .LBB4_9
 ; RV64I-NEXT:  # %bb.2: # %if.end4
 ; RV64I-NEXT:    call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT:    call ngt at plt
+; RV64I-NEXT:    call ngt
 ; RV64I-NEXT:    bnez a0, .LBB4_9
 ; RV64I-NEXT:  # %bb.3: # %if.end8
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call ngt at plt
+; RV64I-NEXT:    call ngt
 ; RV64I-NEXT:    li s1, -1
 ; RV64I-NEXT:    bne a0, s1, .LBB4_9
 ; RV64I-NEXT:  # %bb.4: # %if.end12
 ; RV64I-NEXT:    call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT:    call nle at plt
+; RV64I-NEXT:    call nle
 ; RV64I-NEXT:    bne a0, s1, .LBB4_9
 ; RV64I-NEXT:  # %bb.5: # %if.end16
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call nle at plt
+; RV64I-NEXT:    call nle
 ; RV64I-NEXT:    bnez a0, .LBB4_9
 ; RV64I-NEXT:  # %bb.6: # %if.end20
 ; RV64I-NEXT:    call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT:    call nlt at plt
+; RV64I-NEXT:    call nlt
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    bne a0, a1, .LBB4_9
 ; RV64I-NEXT:  # %bb.7: # %if.end24
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call nlt at plt
+; RV64I-NEXT:    call nlt
 ; RV64I-NEXT:    bnez a0, .LBB4_9
 ; RV64I-NEXT:  # %bb.8: # %if.end28
-; RV64I-NEXT:    call exit at plt
+; RV64I-NEXT:    call exit
 ; RV64I-NEXT:  .LBB4_9: # %if.then
-; RV64I-NEXT:    call abort at plt
+; RV64I-NEXT:    call abort
 ;
 ; RV64I-LABEL: OUTLINED_FUNCTION_0:
 ; RV64I:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll b/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
index 21254b6302038a..2de29fe2fa68a9 100644
--- a/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
+++ b/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
@@ -15,12 +15,12 @@ define i32 @func1(i32 %x) #0 {
 ; CHECK-NEXT:    mul a0, a0, a0
 ; CHECK-NEXT:    addi s0, a0, 1
 ; CHECK-NEXT:    li a0, 4
-; CHECK-NEXT:    call __cxa_allocate_exception at plt
+; CHECK-NEXT:    call __cxa_allocate_exception
 ; CHECK-NEXT:    sw s0, 0(a0)
 ; CHECK-NEXT:    lui a1, %hi(_ZTIi)
 ; CHECK-NEXT:    addi a1, a1, %lo(_ZTIi)
 ; CHECK-NEXT:    li a2, 0
-; CHECK-NEXT:    call __cxa_throw at plt
+; CHECK-NEXT:    call __cxa_throw
 entry:
   %mul = mul i32 %x, %x
   %add = add i32 %mul, 1
@@ -42,12 +42,12 @@ define i32 @func2(i32 %x) #0 {
 ; CHECK-NEXT:    mul a0, a0, a0
 ; CHECK-NEXT:    addi s0, a0, 1
 ; CHECK-NEXT:    li a0, 4
-; CHECK-NEXT:    call __cxa_allocate_exception at plt
+; CHECK-NEXT:    call __cxa_allocate_exception
 ; CHECK-NEXT:    sw s0, 0(a0)
 ; CHECK-NEXT:    lui a1, %hi(_ZTIi)
 ; CHECK-NEXT:    addi a1, a1, %lo(_ZTIi)
 ; CHECK-NEXT:    li a2, 0
-; CHECK-NEXT:    call __cxa_throw at plt
+; CHECK-NEXT:    call __cxa_throw
 entry:
   %mul = mul i32 %x, %x
   %add = add i32 %mul, 1
diff --git a/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll
index 17167e7f51b4c7..27297c97871832 100644
--- a/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll
+++ b/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll
@@ -156,7 +156,7 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; RV32I-NEXT:  .LBB3_1: # %loop
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __tls_get_addr at plt
+; RV32I-NEXT:    call __tls_get_addr
 ; RV32I-NEXT:    lw zero, 0(a0)
 ; RV32I-NEXT:    addi s2, s2, 1
 ; RV32I-NEXT:    blt s2, s0, .LBB3_1
@@ -183,7 +183,7 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; RV64I-NEXT:  .LBB3_1: # %loop
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __tls_get_addr at plt
+; RV64I-NEXT:    call __tls_get_addr
 ; RV64I-NEXT:    lw zero, 0(a0)
 ; RV64I-NEXT:    addiw s2, s2, 1
 ; RV64I-NEXT:    blt s2, s0, .LBB3_1
diff --git a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
index 498e6cf23ba349..b45365e7a8b631 100644
--- a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
+++ b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
@@ -14,21 +14,21 @@ define void @foo(i32 signext %0, i32 signext %1) {
 ; NOFUSION-NEXT:    lui a0, %hi(.L.str)
 ; NOFUSION-NEXT:    fcvt.s.w fa0, a1
 ; NOFUSION-NEXT:    addi a0, a0, %lo(.L.str)
-; NOFUSION-NEXT:    tail bar at plt
+; NOFUSION-NEXT:    tail bar
 ;
 ; FUSION-LABEL: foo:
 ; FUSION:       # %bb.0:
 ; FUSION-NEXT:    fcvt.s.w fa0, a1
 ; FUSION-NEXT:    lui a0, %hi(.L.str)
 ; FUSION-NEXT:    addi a0, a0, %lo(.L.str)
-; FUSION-NEXT:    tail bar at plt
+; FUSION-NEXT:    tail bar
 ;
 ; FUSION-POSTRA-LABEL: foo:
 ; FUSION-POSTRA:       # %bb.0:
 ; FUSION-POSTRA-NEXT:    fcvt.s.w fa0, a1
 ; FUSION-POSTRA-NEXT:    lui a0, %hi(.L.str)
 ; FUSION-POSTRA-NEXT:    addi a0, a0, %lo(.L.str)
-; FUSION-POSTRA-NEXT:    tail bar at plt
+; FUSION-POSTRA-NEXT:    tail bar
   %3 = sitofp i32 %1 to float
   tail call void @bar(ptr @.str, float %3)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll
index 7c98d4ae1b3f24..a9cb80cb66349c 100644
--- a/llvm/test/CodeGen/RISCV/mem.ll
+++ b/llvm/test/CodeGen/RISCV/mem.ll
@@ -324,7 +324,7 @@ define void @addi_fold_crash(i32 %arg) nounwind {
 ; RV32I-NEXT:    add a0, a1, a0
 ; RV32I-NEXT:    sb zero, 0(a0)
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    call snork at plt
+; RV32I-NEXT:    call snork
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index 09b04535498c5e..248964146325a8 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -363,7 +363,7 @@ define void @addi_fold_crash(i64 %arg) nounwind {
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    sb zero, 0(a0)
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    call snork at plt
+; RV64I-NEXT:    call snork
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/memcpy.ll b/llvm/test/CodeGen/RISCV/memcpy.ll
index 26ad872509194f..12ec0881b20d9f 100644
--- a/llvm/test/CodeGen/RISCV/memcpy.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy.ll
@@ -91,14 +91,14 @@ define void @t1(ptr nocapture %C) nounwind {
 ; RV32-NEXT:    lui a1, %hi(.L.str1)
 ; RV32-NEXT:    addi a1, a1, %lo(.L.str1)
 ; RV32-NEXT:    li a2, 31
-; RV32-NEXT:    tail memcpy at plt
+; RV32-NEXT:    tail memcpy
 ;
 ; RV64-LABEL: t1:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    lui a1, %hi(.L.str1)
 ; RV64-NEXT:    addi a1, a1, %lo(.L.str1)
 ; RV64-NEXT:    li a2, 31
-; RV64-NEXT:    tail memcpy at plt
+; RV64-NEXT:    tail memcpy
 ;
 ; RV32-FAST-LABEL: t1:
 ; RV32-FAST:       # %bb.0: # %entry
@@ -152,14 +152,14 @@ define void @t2(ptr nocapture %C) nounwind {
 ; RV32-BOTH-NEXT:    lui a1, %hi(.L.str2)
 ; RV32-BOTH-NEXT:    addi a1, a1, %lo(.L.str2)
 ; RV32-BOTH-NEXT:    li a2, 36
-; RV32-BOTH-NEXT:    tail memcpy at plt
+; RV32-BOTH-NEXT:    tail memcpy
 ;
 ; RV64-LABEL: t2:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    lui a1, %hi(.L.str2)
 ; RV64-NEXT:    addi a1, a1, %lo(.L.str2)
 ; RV64-NEXT:    li a2, 36
-; RV64-NEXT:    tail memcpy at plt
+; RV64-NEXT:    tail memcpy
 ;
 ; RV64-FAST-LABEL: t2:
 ; RV64-FAST:       # %bb.0: # %entry
@@ -188,14 +188,14 @@ define void @t3(ptr nocapture %C) nounwind {
 ; RV32-NEXT:    lui a1, %hi(.L.str3)
 ; RV32-NEXT:    addi a1, a1, %lo(.L.str3)
 ; RV32-NEXT:    li a2, 24
-; RV32-NEXT:    tail memcpy at plt
+; RV32-NEXT:    tail memcpy
 ;
 ; RV64-LABEL: t3:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    lui a1, %hi(.L.str3)
 ; RV64-NEXT:    addi a1, a1, %lo(.L.str3)
 ; RV64-NEXT:    li a2, 24
-; RV64-NEXT:    tail memcpy at plt
+; RV64-NEXT:    tail memcpy
 ;
 ; RV32-FAST-LABEL: t3:
 ; RV32-FAST:       # %bb.0: # %entry
@@ -241,14 +241,14 @@ define void @t4(ptr nocapture %C) nounwind {
 ; RV32-NEXT:    lui a1, %hi(.L.str4)
 ; RV32-NEXT:    addi a1, a1, %lo(.L.str4)
 ; RV32-NEXT:    li a2, 18
-; RV32-NEXT:    tail memcpy at plt
+; RV32-NEXT:    tail memcpy
 ;
 ; RV64-LABEL: t4:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    lui a1, %hi(.L.str4)
 ; RV64-NEXT:    addi a1, a1, %lo(.L.str4)
 ; RV64-NEXT:    li a2, 18
-; RV64-NEXT:    tail memcpy at plt
+; RV64-NEXT:    tail memcpy
 ;
 ; RV32-FAST-LABEL: t4:
 ; RV32-FAST:       # %bb.0: # %entry
@@ -353,7 +353,7 @@ define void @t6() nounwind {
 ; RV32-NEXT:    lui a1, %hi(.L.str6)
 ; RV32-NEXT:    addi a1, a1, %lo(.L.str6)
 ; RV32-NEXT:    li a2, 14
-; RV32-NEXT:    call memcpy at plt
+; RV32-NEXT:    call memcpy
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -367,7 +367,7 @@ define void @t6() nounwind {
 ; RV64-NEXT:    lui a1, %hi(.L.str6)
 ; RV64-NEXT:    addi a1, a1, %lo(.L.str6)
 ; RV64-NEXT:    li a2, 14
-; RV64-NEXT:    call memcpy at plt
+; RV64-NEXT:    call memcpy
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
index b7c0a9f292738d..45db5078f150c4 100644
--- a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
+++ b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
@@ -34,7 +34,7 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    li a6, 0
 ; CHECK-NEXT:    li a7, 0
-; CHECK-NEXT:    call _Z3fooiiiiiiiiiiPi at plt
+; CHECK-NEXT:    call _Z3fooiiiiiiiiiiPi
 ; CHECK-NEXT:    addi sp, sp, 32
 ; CHECK-NEXT:  .Ltmp1:
 ; CHECK-NEXT:  # %bb.1: # %try.cont.unreachable
@@ -44,9 +44,9 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:    li a2, 1
 ; CHECK-NEXT:    bne a1, a2, .LBB0_4
 ; CHECK-NEXT:  # %bb.3: # %catch
-; CHECK-NEXT:    call __cxa_begin_catch at plt
+; CHECK-NEXT:    call __cxa_begin_catch
 ; CHECK-NEXT:    lw s1, 0(a0)
-; CHECK-NEXT:    call __cxa_end_catch at plt
+; CHECK-NEXT:    call __cxa_end_catch
 ; CHECK-NEXT:    mv a0, s1
 ; CHECK-NEXT:    addi sp, s0, -32
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -55,7 +55,7 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:    addi sp, sp, 32
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_4: # %ehcleanup
-; CHECK-NEXT:    call _Unwind_Resume at plt
+; CHECK-NEXT:    call _Unwind_Resume
 entry:
   invoke void @_Z3fooiiiiiiiiiiPi(i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 poison, i32 poison, i32 poison)
           to label %try.cont.unreachable unwind label %lpad
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index f2b7e8d26328d5..af341dbaadeabd 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -14,7 +14,7 @@ define signext i32 @square(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -29,7 +29,7 @@ define signext i32 @square(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -48,7 +48,7 @@ define signext i32 @mul(i32 %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -62,7 +62,7 @@ define signext i32 @mul(i32 %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -133,7 +133,7 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -150,7 +150,7 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: mul64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __muldi3 at plt
+; RV64I-NEXT:    tail __muldi3
 ;
 ; RV64IM-LABEL: mul64:
 ; RV64IM:       # %bb.0:
@@ -208,7 +208,7 @@ define i32 @mulhs(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    srai a1, a0, 31
 ; RV32I-NEXT:    srai a3, a2, 31
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -225,7 +225,7 @@ define i32 @mulhs(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -343,7 +343,7 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -358,7 +358,7 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -385,7 +385,7 @@ define i32 @mulhsu(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    srai a3, a1, 31
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -403,7 +403,7 @@ define i32 @mulhsu(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -751,7 +751,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p384:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, 384
-; RV32I-NEXT:    tail __mulsi3 at plt
+; RV32I-NEXT:    tail __mulsi3
 ;
 ; RV32IM-LABEL: muli32_p384:
 ; RV32IM:       # %bb.0:
@@ -764,7 +764,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 384
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -782,7 +782,7 @@ define i32 @muli32_p12288(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p12288:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 3
-; RV32I-NEXT:    tail __mulsi3 at plt
+; RV32I-NEXT:    tail __mulsi3
 ;
 ; RV32IM-LABEL: muli32_p12288:
 ; RV32IM:       # %bb.0:
@@ -795,7 +795,7 @@ define i32 @muli32_p12288(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lui a1, 3
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -910,7 +910,7 @@ define i32 @muli32_m4352(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, -17
 ; RV32I-NEXT:    slli a1, a1, 8
-; RV32I-NEXT:    tail __mulsi3 at plt
+; RV32I-NEXT:    tail __mulsi3
 ;
 ; RV32IM-LABEL: muli32_m4352:
 ; RV32IM:       # %bb.0:
@@ -925,7 +925,7 @@ define i32 @muli32_m4352(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, -17
 ; RV64I-NEXT:    slli a1, a1, 8
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1036,7 +1036,7 @@ define i64 @muli64_m4352(i64 %a) nounwind {
 ; RV32I-NEXT:    li a2, -17
 ; RV32I-NEXT:    slli a2, a2, 8
 ; RV32I-NEXT:    li a3, -1
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1056,7 +1056,7 @@ define i64 @muli64_m4352(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a1, -17
 ; RV64I-NEXT:    slli a1, a1, 8
-; RV64I-NEXT:    tail __muldi3 at plt
+; RV64I-NEXT:    tail __muldi3
 ;
 ; RV64IM-LABEL: muli64_m4352:
 ; RV64IM:       # %bb.0:
@@ -1395,13 +1395,13 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    srai s4, a3, 31
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    mv a2, s3
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    add s5, a0, s5
 ; RV32I-NEXT:    sltu a0, s5, a0
 ; RV32I-NEXT:    add s7, a1, a0
@@ -1409,7 +1409,7 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    add s5, a0, s5
 ; RV32I-NEXT:    sltu a0, s5, a0
 ; RV32I-NEXT:    add a0, a1, a0
@@ -1418,7 +1418,7 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    mv s6, a1
 ; RV32I-NEXT:    add s9, a0, s8
@@ -1426,14 +1426,14 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s4
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    mv a3, s0
-; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    call __muldi3
 ; RV32I-NEXT:    add s2, a0, s2
 ; RV32I-NEXT:    add a2, s9, s2
 ; RV32I-NEXT:    sltu a3, a2, s9
@@ -1502,7 +1502,7 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a2, a1
 ; RV64I-NEXT:    srai a3, a1, 63
 ; RV64I-NEXT:    li a1, 0
-; RV64I-NEXT:    call __multi3 at plt
+; RV64I-NEXT:    call __multi3
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/nest-register.ll b/llvm/test/CodeGen/RISCV/nest-register.ll
index 97704ebae4cb23..e222beee45783b 100644
--- a/llvm/test/CodeGen/RISCV/nest-register.ll
+++ b/llvm/test/CodeGen/RISCV/nest-register.ll
@@ -17,7 +17,6 @@ define ptr @nest_receiver(ptr nest %arg) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a0, t2
 ; RV64I-NEXT:    ret
-;
   ret ptr %arg
 }
 
@@ -27,7 +26,7 @@ define ptr @nest_caller(ptr %arg) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv t2, a0
-; RV32I-NEXT:    call nest_receiver at plt
+; RV32I-NEXT:    call nest_receiver
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -37,11 +36,10 @@ define ptr @nest_caller(ptr %arg) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv t2, a0
-; RV64I-NEXT:    call nest_receiver at plt
+; RV64I-NEXT:    call nest_receiver
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-;
   %result = call ptr @nest_receiver(ptr nest %arg)
   ret ptr %result
 }
diff --git a/llvm/test/CodeGen/RISCV/nomerge.ll b/llvm/test/CodeGen/RISCV/nomerge.ll
index f4e50b6697aef0..8e77adfc16e27d 100644
--- a/llvm/test/CodeGen/RISCV/nomerge.ll
+++ b/llvm/test/CodeGen/RISCV/nomerge.ll
@@ -13,14 +13,14 @@ define void @foo(i32 %i) nounwind {
 ; CHECK-NEXT:    li a1, 5
 ; CHECK-NEXT:    bne a0, a1, .LBB0_4
 ; CHECK-NEXT:  # %bb.2: # %if.then
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    j .LBB0_4
 ; CHECK-NEXT:  .LBB0_3: # %if.then2
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:  .LBB0_4: # %if.end3
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 entry:
   switch i32 %i, label %if.end3 [
     i32 5, label %if.then
@@ -46,9 +46,9 @@ define void @foo_tail(i1 %i) nounwind {
 ; CHECK-NEXT:    andi a0, a0, 1
 ; CHECK-NEXT:    beqz a0, .LBB1_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 ; CHECK-NEXT:  .LBB1_2: # %if.else
-; CHECK-NEXT:    tail bar at plt
+; CHECK-NEXT:    tail bar
 entry:
   br i1 %i, label %if.then, label %if.else
 
diff --git a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
index ca80abc546012a..19ad7b16e386ea 100644
--- a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
@@ -40,7 +40,7 @@
   ; CHECK-NEXT:    add a1, sp, a1
   ; CHECK-NEXT:    sd a0, -8(a1)
   ; CHECK-NEXT:    ld a1, 0(sp)
-  ; CHECK-NEXT:    call foo at plt
+  ; CHECK-NEXT:    call foo
   ; CHECK-NEXT:    lui a0, 2
   ; CHECK-NEXT:    sub sp, s0, a0
   ; CHECK-NEXT:    addiw a0, a0, -2032
@@ -80,3 +80,5 @@ body:             |
     PseudoRET
 
 ...
+## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+# DEBUG: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
index 7c3294fa81dcfe..4bb65f376218f1 100644
--- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
@@ -451,7 +451,7 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
 ; RV32-NEXT:  .LBB10_4:
 ; RV32-NEXT:    neg s1, a2
 ; RV32-NEXT:    and s1, s1, a3
-; RV32-NEXT:    call use at plt
+; RV32-NEXT:    call use
 ; RV32-NEXT:    mv a0, s0
 ; RV32-NEXT:    mv a1, s1
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -474,7 +474,7 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    li s0, 42
 ; RV64-NEXT:  .LBB10_2:
-; RV64-NEXT:    call use at plt
+; RV64-NEXT:    call use
 ; RV64-NEXT:    mv a0, s0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -1091,7 +1091,7 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
 ; RV32-NEXT:    sltu s6, s4, s1
 ; RV32-NEXT:  .LBB32_4: # %t
 ; RV32-NEXT:    mv a0, s6
-; RV32-NEXT:    call call at plt
+; RV32-NEXT:    call call
 ; RV32-NEXT:    beqz s6, .LBB32_8
 ; RV32-NEXT:  # %bb.5: # %end
 ; RV32-NEXT:    sltu a1, s4, s1
@@ -1145,7 +1145,7 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
 ; RV64-NEXT:    mv s3, a0
 ; RV64-NEXT:    sltu s4, a0, a1
 ; RV64-NEXT:    mv a0, s4
-; RV64-NEXT:    call call at plt
+; RV64-NEXT:    call call
 ; RV64-NEXT:    bgeu s3, s2, .LBB32_3
 ; RV64-NEXT:  # %bb.2: # %end
 ; RV64-NEXT:    sub a0, s3, s2
diff --git a/llvm/test/CodeGen/RISCV/pr51206.ll b/llvm/test/CodeGen/RISCV/pr51206.ll
index b83903e7c55cd4..f54031af0de5e6 100644
--- a/llvm/test/CodeGen/RISCV/pr51206.ll
+++ b/llvm/test/CodeGen/RISCV/pr51206.ll
@@ -31,7 +31,7 @@ define signext i32 @wobble() nounwind {
 ; CHECK-NEXT:  # %bb.1: # %bb10
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    call quux at plt
+; CHECK-NEXT:    call quux
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:  .LBB0_2: # %bb12
diff --git a/llvm/test/CodeGen/RISCV/pr63816.ll b/llvm/test/CodeGen/RISCV/pr63816.ll
index 6eaec08abb909e..2e33a0e994996a 100644
--- a/llvm/test/CodeGen/RISCV/pr63816.ll
+++ b/llvm/test/CodeGen/RISCV/pr63816.ll
@@ -19,31 +19,31 @@ define void @test(ptr %0, ptr %1) nounwind {
 ; CHECK-NEXT:    mv s1, a0
 ; CHECK-NEXT:    lhu a0, 12(a0)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs0, fa0
 ; CHECK-NEXT:    lhu a0, 10(s1)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs1, fa0
 ; CHECK-NEXT:    lhu a0, 8(s1)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs2, fa0
 ; CHECK-NEXT:    lhu a0, 6(s1)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs3, fa0
 ; CHECK-NEXT:    lhu a0, 4(s1)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs4, fa0
 ; CHECK-NEXT:    lhu a0, 2(s1)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fmv.s fs5, fa0
 ; CHECK-NEXT:    lhu a0, 0(s1)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fcvt.d.s fs6, fa0
 ; CHECK-NEXT:    fcvt.d.s fs5, fs5
 ; CHECK-NEXT:    fcvt.d.s fs4, fs4
@@ -53,7 +53,7 @@ define void @test(ptr %0, ptr %1) nounwind {
 ; CHECK-NEXT:    fcvt.d.s fs1, fs1
 ; CHECK-NEXT:    fmv.w.x fa0, a0
 ; CHECK-NEXT:    fcvt.d.s fs0, fs0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fcvt.d.s fa5, fa0
 ; CHECK-NEXT:    fsd fa5, 56(s0)
 ; CHECK-NEXT:    fsd fs0, 48(s0)
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 9ff4235746caf0..945e7b46f8c9ff 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -24,7 +24,7 @@ define i32 @foo() {
 ; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 528
 ; RV32IZCMP-NEXT:    .cfi_offset ra, -4
 ; RV32IZCMP-NEXT:    mv a0, sp
-; RV32IZCMP-NEXT:    call test at plt
+; RV32IZCMP-NEXT:    call test
 ; RV32IZCMP-NEXT:    addi sp, sp, 464
 ; RV32IZCMP-NEXT:    cm.popretz {ra}, 64
 ;
@@ -35,7 +35,7 @@ define i32 @foo() {
 ; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 528
 ; RV64IZCMP-NEXT:    .cfi_offset ra, -8
 ; RV64IZCMP-NEXT:    mv a0, sp
-; RV64IZCMP-NEXT:    call test at plt
+; RV64IZCMP-NEXT:    call test
 ; RV64IZCMP-NEXT:    addi sp, sp, 464
 ; RV64IZCMP-NEXT:    cm.popretz {ra}, 64
 ;
@@ -46,7 +46,7 @@ define i32 @foo() {
 ; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 528
 ; RV32IZCMP-SR-NEXT:    .cfi_offset ra, -4
 ; RV32IZCMP-SR-NEXT:    mv a0, sp
-; RV32IZCMP-SR-NEXT:    call test at plt
+; RV32IZCMP-SR-NEXT:    call test
 ; RV32IZCMP-SR-NEXT:    addi sp, sp, 464
 ; RV32IZCMP-SR-NEXT:    cm.popretz {ra}, 64
 ;
@@ -57,7 +57,7 @@ define i32 @foo() {
 ; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 528
 ; RV64IZCMP-SR-NEXT:    .cfi_offset ra, -8
 ; RV64IZCMP-SR-NEXT:    mv a0, sp
-; RV64IZCMP-SR-NEXT:    call test at plt
+; RV64IZCMP-SR-NEXT:    call test
 ; RV64IZCMP-SR-NEXT:    addi sp, sp, 464
 ; RV64IZCMP-SR-NEXT:    cm.popretz {ra}, 64
 ;
@@ -68,7 +68,7 @@ define i32 @foo() {
 ; RV32I-NEXT:    sw ra, 524(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    addi a0, sp, 12
-; RV32I-NEXT:    call test at plt
+; RV32I-NEXT:    call test
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    lw ra, 524(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 528
@@ -81,7 +81,7 @@ define i32 @foo() {
 ; RV64I-NEXT:    sd ra, 520(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    addi a0, sp, 8
-; RV64I-NEXT:    call test at plt
+; RV64I-NEXT:    call test
 ; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ld ra, 520(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 528
@@ -105,7 +105,7 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV32IZCMP-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-NEXT:    mv sp, a0
-; RV32IZCMP-NEXT:    call callee_void at plt
+; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-NEXT:    cm.popretz {ra, s0}, 16
 ;
@@ -123,7 +123,7 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV64IZCMP-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-NEXT:    mv sp, a0
-; RV64IZCMP-NEXT:    call callee_void at plt
+; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-NEXT:    cm.popretz {ra, s0}, 16
 ;
@@ -139,7 +139,7 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV32IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
-; RV32IZCMP-SR-NEXT:    call callee_void at plt
+; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-SR-NEXT:    cm.popretz {ra, s0}, 16
 ;
@@ -157,7 +157,7 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV64IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
-; RV64IZCMP-SR-NEXT:    call callee_void at plt
+; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-SR-NEXT:    cm.popretz {ra, s0}, 16
 ;
@@ -175,7 +175,7 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call callee_void at plt
+; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -199,7 +199,7 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV64I-NEXT:    andi a0, a0, -16
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
-; RV64I-NEXT:    call callee_void at plt
+; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    addi sp, s0, -16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -225,7 +225,7 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV32IZCMP-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-NEXT:    mv sp, a0
-; RV32IZCMP-NEXT:    call callee_void at plt
+; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    li a0, 1
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-NEXT:    cm.popret {ra, s0}, 16
@@ -244,7 +244,7 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV64IZCMP-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-NEXT:    mv sp, a0
-; RV64IZCMP-NEXT:    call callee_void at plt
+; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    li a0, 1
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-NEXT:    cm.popret {ra, s0}, 16
@@ -261,7 +261,7 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
-; RV32IZCMP-SR-NEXT:    call callee_void at plt
+; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    li a0, 1
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
@@ -280,7 +280,7 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
-; RV64IZCMP-SR-NEXT:    call callee_void at plt
+; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    li a0, 1
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
@@ -299,7 +299,7 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call callee_void at plt
+; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -323,7 +323,7 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV64I-NEXT:    andi a0, a0, -16
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
-; RV64I-NEXT:    call callee_void at plt
+; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    addi sp, s0, -16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -349,7 +349,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV32IZCMP-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-NEXT:    mv sp, a0
-; RV32IZCMP-NEXT:    call callee_void at plt
+; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    li a0, -1
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-NEXT:    cm.popret {ra, s0}, 16
@@ -368,7 +368,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV64IZCMP-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-NEXT:    mv sp, a0
-; RV64IZCMP-NEXT:    call callee_void at plt
+; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    li a0, -1
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-NEXT:    cm.popret {ra, s0}, 16
@@ -385,7 +385,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
-; RV32IZCMP-SR-NEXT:    call callee_void at plt
+; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    li a0, -1
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
@@ -404,7 +404,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
-; RV64IZCMP-SR-NEXT:    call callee_void at plt
+; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    li a0, -1
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
@@ -423,7 +423,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call callee_void at plt
+; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, -1
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -447,7 +447,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV64I-NEXT:    andi a0, a0, -16
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
-; RV64I-NEXT:    call callee_void at plt
+; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    addi sp, s0, -16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -473,7 +473,7 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV32IZCMP-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-NEXT:    mv sp, a0
-; RV32IZCMP-NEXT:    call callee_void at plt
+; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    li a0, 2
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-NEXT:    cm.popret {ra, s0}, 16
@@ -492,7 +492,7 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV64IZCMP-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-NEXT:    mv sp, a0
-; RV64IZCMP-NEXT:    call callee_void at plt
+; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    li a0, 2
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-NEXT:    cm.popret {ra, s0}, 16
@@ -509,7 +509,7 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
-; RV32IZCMP-SR-NEXT:    call callee_void at plt
+; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    li a0, 2
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
@@ -528,7 +528,7 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
-; RV64IZCMP-SR-NEXT:    call callee_void at plt
+; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    li a0, 2
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
@@ -547,7 +547,7 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call callee_void at plt
+; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, 2
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -571,7 +571,7 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV64I-NEXT:    andi a0, a0, -16
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
-; RV64I-NEXT:    call callee_void at plt
+; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, 2
 ; RV64I-NEXT:    addi sp, s0, -16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -599,7 +599,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV32IZCMP-NEXT:    mv sp, a0
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-NEXT:    cm.pop {ra, s0}, 16
-; RV32IZCMP-NEXT:    tail callee at plt
+; RV32IZCMP-NEXT:    tail callee
 ;
 ; RV64IZCMP-LABEL: tailcall:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -617,7 +617,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV64IZCMP-NEXT:    mv sp, a0
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-NEXT:    cm.pop {ra, s0}, 16
-; RV64IZCMP-NEXT:    tail callee at plt
+; RV64IZCMP-NEXT:    tail callee
 ;
 ; RV32IZCMP-SR-LABEL: tailcall:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -633,7 +633,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
-; RV32IZCMP-SR-NEXT:    tail callee at plt
+; RV32IZCMP-SR-NEXT:    tail callee
 ;
 ; RV64IZCMP-SR-LABEL: tailcall:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -651,7 +651,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
-; RV64IZCMP-SR-NEXT:    tail callee at plt
+; RV64IZCMP-SR-NEXT:    tail callee
 ;
 ; RV32I-LABEL: tailcall:
 ; RV32I:       # %bb.0: # %entry
@@ -671,7 +671,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    tail callee at plt
+; RV32I-NEXT:    tail callee
 ;
 ; RV64I-LABEL: tailcall:
 ; RV64I:       # %bb.0: # %entry
@@ -693,7 +693,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    tail callee at plt
+; RV64I-NEXT:    tail callee
 entry:
   %0 = alloca i8, i32 %size, align 16
   %1 = tail call i32 @callee(i8* nonnull %0)
@@ -730,7 +730,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32IZCMP-NEXT:    addi s7, s1, %lo(var)
 ; RV32IZCMP-NEXT:    lw s8, 16(s7)
 ; RV32IZCMP-NEXT:    mv a0, s2
-; RV32IZCMP-NEXT:    call callee_void at plt
+; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    sw s8, 16(s7)
 ; RV32IZCMP-NEXT:    sw s6, %lo(var+12)(s1)
 ; RV32IZCMP-NEXT:    sw s5, %lo(var+8)(s1)
@@ -739,7 +739,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32IZCMP-NEXT:    mv a0, s2
 ; RV32IZCMP-NEXT:    addi sp, s0, -48
 ; RV32IZCMP-NEXT:    cm.pop {ra, s0-s8}, 48
-; RV32IZCMP-NEXT:    tail callee at plt
+; RV32IZCMP-NEXT:    tail callee
 ;
 ; RV64IZCMP-LABEL: nocompress:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -771,7 +771,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64IZCMP-NEXT:    addi s7, s1, %lo(var)
 ; RV64IZCMP-NEXT:    lw s8, 16(s7)
 ; RV64IZCMP-NEXT:    mv a0, s2
-; RV64IZCMP-NEXT:    call callee_void at plt
+; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    sw s8, 16(s7)
 ; RV64IZCMP-NEXT:    sw s6, %lo(var+12)(s1)
 ; RV64IZCMP-NEXT:    sw s5, %lo(var+8)(s1)
@@ -780,7 +780,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64IZCMP-NEXT:    mv a0, s2
 ; RV64IZCMP-NEXT:    addi sp, s0, -80
 ; RV64IZCMP-NEXT:    cm.pop {ra, s0-s8}, 80
-; RV64IZCMP-NEXT:    tail callee at plt
+; RV64IZCMP-NEXT:    tail callee
 ;
 ; RV32IZCMP-SR-LABEL: nocompress:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -810,7 +810,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    addi s7, s1, %lo(var)
 ; RV32IZCMP-SR-NEXT:    lw s8, 16(s7)
 ; RV32IZCMP-SR-NEXT:    mv a0, s2
-; RV32IZCMP-SR-NEXT:    call callee_void at plt
+; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    sw s8, 16(s7)
 ; RV32IZCMP-SR-NEXT:    sw s6, %lo(var+12)(s1)
 ; RV32IZCMP-SR-NEXT:    sw s5, %lo(var+8)(s1)
@@ -819,7 +819,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    mv a0, s2
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -48
 ; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0-s8}, 48
-; RV32IZCMP-SR-NEXT:    tail callee at plt
+; RV32IZCMP-SR-NEXT:    tail callee
 ;
 ; RV64IZCMP-SR-LABEL: nocompress:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -851,7 +851,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    addi s7, s1, %lo(var)
 ; RV64IZCMP-SR-NEXT:    lw s8, 16(s7)
 ; RV64IZCMP-SR-NEXT:    mv a0, s2
-; RV64IZCMP-SR-NEXT:    call callee_void at plt
+; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    sw s8, 16(s7)
 ; RV64IZCMP-SR-NEXT:    sw s6, %lo(var+12)(s1)
 ; RV64IZCMP-SR-NEXT:    sw s5, %lo(var+8)(s1)
@@ -860,7 +860,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    mv a0, s2
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -80
 ; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0-s8}, 80
-; RV64IZCMP-SR-NEXT:    tail callee at plt
+; RV64IZCMP-SR-NEXT:    tail callee
 ;
 ; RV32I-LABEL: nocompress:
 ; RV32I:       # %bb.0: # %entry
@@ -900,7 +900,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32I-NEXT:    addi s7, s2, %lo(var)
 ; RV32I-NEXT:    lw s8, 16(s7)
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call callee_void at plt
+; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    sw s8, 16(s7)
 ; RV32I-NEXT:    sw s6, %lo(var+12)(s2)
 ; RV32I-NEXT:    sw s5, %lo(var+8)(s2)
@@ -919,7 +919,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
-; RV32I-NEXT:    tail callee at plt
+; RV32I-NEXT:    tail callee
 ;
 ; RV64I-LABEL: nocompress:
 ; RV64I:       # %bb.0: # %entry
@@ -961,7 +961,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64I-NEXT:    addi s7, s2, %lo(var)
 ; RV64I-NEXT:    lw s8, 16(s7)
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call callee_void at plt
+; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    sw s8, 16(s7)
 ; RV64I-NEXT:    sw s6, %lo(var+12)(s2)
 ; RV64I-NEXT:    sw s5, %lo(var+8)(s2)
@@ -980,7 +980,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64I-NEXT:    ld s7, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s8, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 80
-; RV64I-NEXT:    tail callee at plt
+; RV64I-NEXT:    tail callee
 entry:
   %0 = alloca i8, i32 %size, align 16
   %val = load [5 x i32], [5 x i32]* @var
@@ -1405,7 +1405,7 @@ define void @alloca(i32 %n) nounwind {
 ; RV32IZCMP-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-NEXT:    mv sp, a0
-; RV32IZCMP-NEXT:    call notdead at plt
+; RV32IZCMP-NEXT:    call notdead
 ; RV32IZCMP-NEXT:    mv sp, s1
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-NEXT:    cm.popret {ra, s0-s1}, 16
@@ -1421,7 +1421,7 @@ define void @alloca(i32 %n) nounwind {
 ; RV64IZCMP-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-NEXT:    mv sp, a0
-; RV64IZCMP-NEXT:    call notdead at plt
+; RV64IZCMP-NEXT:    call notdead
 ; RV64IZCMP-NEXT:    mv sp, s1
 ; RV64IZCMP-NEXT:    addi sp, s0, -32
 ; RV64IZCMP-NEXT:    cm.popret {ra, s0-s1}, 32
@@ -1435,7 +1435,7 @@ define void @alloca(i32 %n) nounwind {
 ; RV32IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV32IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
-; RV32IZCMP-SR-NEXT:    call notdead at plt
+; RV32IZCMP-SR-NEXT:    call notdead
 ; RV32IZCMP-SR-NEXT:    mv sp, s1
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
 ; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 16
@@ -1451,7 +1451,7 @@ define void @alloca(i32 %n) nounwind {
 ; RV64IZCMP-SR-NEXT:    andi a0, a0, -16
 ; RV64IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
-; RV64IZCMP-SR-NEXT:    call notdead at plt
+; RV64IZCMP-SR-NEXT:    call notdead
 ; RV64IZCMP-SR-NEXT:    mv sp, s1
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -32
 ; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 32
@@ -1468,7 +1468,7 @@ define void @alloca(i32 %n) nounwind {
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call notdead at plt
+; RV32I-NEXT:    call notdead
 ; RV32I-NEXT:    mv sp, s1
 ; RV32I-NEXT:    addi sp, s0, -16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1491,7 +1491,7 @@ define void @alloca(i32 %n) nounwind {
 ; RV64I-NEXT:    andi a0, a0, -16
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
-; RV64I-NEXT:    call notdead at plt
+; RV64I-NEXT:    call notdead
 ; RV64I-NEXT:    mv sp, s1
 ; RV64I-NEXT:    addi sp, s0, -32
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1529,7 +1529,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
 ; RV32IZCMP-NEXT:    sw t4, 12(sp) # 4-byte Folded Spill
 ; RV32IZCMP-NEXT:    sw t5, 8(sp) # 4-byte Folded Spill
 ; RV32IZCMP-NEXT:    sw t6, 4(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT:    call foo_test_irq at plt
+; RV32IZCMP-NEXT:    call foo_test_irq
 ; RV32IZCMP-NEXT:    lw t0, 60(sp) # 4-byte Folded Reload
 ; RV32IZCMP-NEXT:    lw t1, 56(sp) # 4-byte Folded Reload
 ; RV32IZCMP-NEXT:    lw t2, 52(sp) # 4-byte Folded Reload
@@ -1568,7 +1568,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
 ; RV64IZCMP-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
 ; RV64IZCMP-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
 ; RV64IZCMP-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT:    call foo_test_irq at plt
+; RV64IZCMP-NEXT:    call foo_test_irq
 ; RV64IZCMP-NEXT:    ld t0, 120(sp) # 8-byte Folded Reload
 ; RV64IZCMP-NEXT:    ld t1, 112(sp) # 8-byte Folded Reload
 ; RV64IZCMP-NEXT:    ld t2, 104(sp) # 8-byte Folded Reload
@@ -1607,7 +1607,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
 ; RV32IZCMP-SR-NEXT:    sw t4, 12(sp) # 4-byte Folded Spill
 ; RV32IZCMP-SR-NEXT:    sw t5, 8(sp) # 4-byte Folded Spill
 ; RV32IZCMP-SR-NEXT:    sw t6, 4(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT:    call foo_test_irq at plt
+; RV32IZCMP-SR-NEXT:    call foo_test_irq
 ; RV32IZCMP-SR-NEXT:    lw t0, 60(sp) # 4-byte Folded Reload
 ; RV32IZCMP-SR-NEXT:    lw t1, 56(sp) # 4-byte Folded Reload
 ; RV32IZCMP-SR-NEXT:    lw t2, 52(sp) # 4-byte Folded Reload
@@ -1646,7 +1646,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
 ; RV64IZCMP-SR-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
 ; RV64IZCMP-SR-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
 ; RV64IZCMP-SR-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT:    call foo_test_irq at plt
+; RV64IZCMP-SR-NEXT:    call foo_test_irq
 ; RV64IZCMP-SR-NEXT:    ld t0, 120(sp) # 8-byte Folded Reload
 ; RV64IZCMP-SR-NEXT:    ld t1, 112(sp) # 8-byte Folded Reload
 ; RV64IZCMP-SR-NEXT:    ld t2, 104(sp) # 8-byte Folded Reload
@@ -1685,7 +1685,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
 ; RV32I-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call foo_test_irq at plt
+; RV32I-NEXT:    call foo_test_irq
 ; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
@@ -1724,7 +1724,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
 ; RV64I-NEXT:    sd t4, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd t5, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd t6, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call foo_test_irq at plt
+; RV64I-NEXT:    call foo_test_irq
 ; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld t0, 112(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld t1, 104(sp) # 8-byte Folded Reload
@@ -1751,32 +1751,32 @@ define void @foo_no_irq() nounwind{
 ; RV32IZCMP-LABEL: foo_no_irq:
 ; RV32IZCMP:       # %bb.0:
 ; RV32IZCMP-NEXT:    cm.push {ra}, -16
-; RV32IZCMP-NEXT:    call foo_test_irq at plt
+; RV32IZCMP-NEXT:    call foo_test_irq
 ; RV32IZCMP-NEXT:    cm.popret {ra}, 16
 ;
 ; RV64IZCMP-LABEL: foo_no_irq:
 ; RV64IZCMP:       # %bb.0:
 ; RV64IZCMP-NEXT:    cm.push {ra}, -16
-; RV64IZCMP-NEXT:    call foo_test_irq at plt
+; RV64IZCMP-NEXT:    call foo_test_irq
 ; RV64IZCMP-NEXT:    cm.popret {ra}, 16
 ;
 ; RV32IZCMP-SR-LABEL: foo_no_irq:
 ; RV32IZCMP-SR:       # %bb.0:
 ; RV32IZCMP-SR-NEXT:    cm.push {ra}, -16
-; RV32IZCMP-SR-NEXT:    call foo_test_irq at plt
+; RV32IZCMP-SR-NEXT:    call foo_test_irq
 ; RV32IZCMP-SR-NEXT:    cm.popret {ra}, 16
 ;
 ; RV64IZCMP-SR-LABEL: foo_no_irq:
 ; RV64IZCMP-SR:       # %bb.0:
 ; RV64IZCMP-SR-NEXT:    cm.push {ra}, -16
-; RV64IZCMP-SR-NEXT:    call foo_test_irq at plt
+; RV64IZCMP-SR-NEXT:    call foo_test_irq
 ; RV64IZCMP-SR-NEXT:    cm.popret {ra}, 16
 ;
 ; RV32I-LABEL: foo_no_irq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call foo_test_irq at plt
+; RV32I-NEXT:    call foo_test_irq
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1785,7 +1785,7 @@ define void @foo_no_irq() nounwind{
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call foo_test_irq at plt
+; RV64I-NEXT:    call foo_test_irq
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -3115,7 +3115,7 @@ define i32 @use_fp(i32 %x) {
 ; RV32IZCMP-NEXT:    mv s1, a0
 ; RV32IZCMP-NEXT:    addi a1, s0, -20
 ; RV32IZCMP-NEXT:    mv a0, s0
-; RV32IZCMP-NEXT:    call bar at plt
+; RV32IZCMP-NEXT:    call bar
 ; RV32IZCMP-NEXT:    mv a0, s1
 ; RV32IZCMP-NEXT:    cm.popret {ra, s0-s1}, 32
 ;
@@ -3131,7 +3131,7 @@ define i32 @use_fp(i32 %x) {
 ; RV64IZCMP-NEXT:    mv s1, a0
 ; RV64IZCMP-NEXT:    addi a1, s0, -36
 ; RV64IZCMP-NEXT:    mv a0, s0
-; RV64IZCMP-NEXT:    call bar at plt
+; RV64IZCMP-NEXT:    call bar
 ; RV64IZCMP-NEXT:    mv a0, s1
 ; RV64IZCMP-NEXT:    cm.popret {ra, s0-s1}, 48
 ;
@@ -3147,7 +3147,7 @@ define i32 @use_fp(i32 %x) {
 ; RV32IZCMP-SR-NEXT:    mv s1, a0
 ; RV32IZCMP-SR-NEXT:    addi a1, s0, -20
 ; RV32IZCMP-SR-NEXT:    mv a0, s0
-; RV32IZCMP-SR-NEXT:    call bar at plt
+; RV32IZCMP-SR-NEXT:    call bar
 ; RV32IZCMP-SR-NEXT:    mv a0, s1
 ; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 32
 ;
@@ -3163,7 +3163,7 @@ define i32 @use_fp(i32 %x) {
 ; RV64IZCMP-SR-NEXT:    mv s1, a0
 ; RV64IZCMP-SR-NEXT:    addi a1, s0, -36
 ; RV64IZCMP-SR-NEXT:    mv a0, s0
-; RV64IZCMP-SR-NEXT:    call bar at plt
+; RV64IZCMP-SR-NEXT:    call bar
 ; RV64IZCMP-SR-NEXT:    mv a0, s1
 ; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 48
 ;
@@ -3182,7 +3182,7 @@ define i32 @use_fp(i32 %x) {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, s0, -16
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call bar at plt
+; RV32I-NEXT:    call bar
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -3205,7 +3205,7 @@ define i32 @use_fp(i32 %x) {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, s0, -28
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call bar at plt
+; RV64I-NEXT:    call bar
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll b/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll
index bb65f408c77a57..351408a7f085cb 100644
--- a/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll
+++ b/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll
@@ -21,15 +21,15 @@ define signext i32 @test() nounwind {
 ; RV64I-NEXT:    beqz s0, .LBB0_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call test1 at plt
+; RV64I-NEXT:    call test1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call test2 at plt
+; RV64I-NEXT:    call test2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call test3 at plt
+; RV64I-NEXT:    call test3
 ; RV64I-NEXT:    j .LBB0_3
 ; RV64I-NEXT:  .LBB0_2:
 ; RV64I-NEXT:    li a0, 0
-; RV64I-NEXT:    call test2 at plt
+; RV64I-NEXT:    call test2
 ; RV64I-NEXT:  .LBB0_3:
 ; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -68,7 +68,7 @@ define signext i32 @test_loop() nounwind {
 ; RV64I-NEXT:    j .LBB1_2
 ; RV64I-NEXT:  .LBB1_1: # in Loop: Header=BB1_2 Depth=1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call test2 at plt
+; RV64I-NEXT:    call test2
 ; RV64I-NEXT:    addiw s1, s1, 1
 ; RV64I-NEXT:    beqz s1, .LBB1_4
 ; RV64I-NEXT:  .LBB1_2: # =>This Inner Loop Header: Depth=1
@@ -76,11 +76,11 @@ define signext i32 @test_loop() nounwind {
 ; RV64I-NEXT:    beqz s0, .LBB1_1
 ; RV64I-NEXT:  # %bb.3: # in Loop: Header=BB1_2 Depth=1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call test1 at plt
+; RV64I-NEXT:    call test1
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call test2 at plt
+; RV64I-NEXT:    call test2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call test3 at plt
+; RV64I-NEXT:    call test3
 ; RV64I-NEXT:    addiw s1, s1, 1
 ; RV64I-NEXT:    bnez s1, .LBB1_2
 ; RV64I-NEXT:  .LBB1_4:
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index f017d8dff2bde3..804bc053728d82 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -42,7 +42,7 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
 ; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT:    call func at plt
+; CHECK-NEXT:    call func
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vrgather.vv v16, v8, v12, v0.t
@@ -108,7 +108,7 @@ define void @last_chance_recoloring_failure() {
 ; SUBREGLIVENESS-NEXT:    vfwadd.vv v16, v8, v12, v0.t
 ; SUBREGLIVENESS-NEXT:    addi a0, sp, 16
 ; SUBREGLIVENESS-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; SUBREGLIVENESS-NEXT:    call func at plt
+; SUBREGLIVENESS-NEXT:    call func
 ; SUBREGLIVENESS-NEXT:    li a0, 32
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; SUBREGLIVENESS-NEXT:    vrgather.vv v16, v8, v12, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index feece1f54ffc65..5b27c4129df6ad 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -11,7 +11,7 @@
 define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: urem:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    tail __umodsi3 at plt
+; RV32I-NEXT:    tail __umodsi3
 ;
 ; RV32IM-LABEL: urem:
 ; RV32IM:       # %bb.0:
@@ -26,7 +26,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -44,7 +44,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    tail __umodsi3 at plt
+; RV32I-NEXT:    tail __umodsi3
 ;
 ; RV32IM-LABEL: urem_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -59,7 +59,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a1, a0, 32
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -76,7 +76,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 define i32 @srem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: srem:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    tail __modsi3 at plt
+; RV32I-NEXT:    tail __modsi3
 ;
 ; RV32IM-LABEL: srem:
 ; RV32IM:       # %bb.0:
@@ -89,7 +89,7 @@ define i32 @srem(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -191,7 +191,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    tail __modsi3 at plt
+; RV32I-NEXT:    tail __modsi3
 ;
 ; RV32IM-LABEL: srem_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -205,7 +205,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -224,7 +224,7 @@ define i64 @urem64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    call __umoddi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -233,14 +233,14 @@ define i64 @urem64(i64 %a, i64 %b) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    call __umoddi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: urem64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __umoddi3 at plt
+; RV64I-NEXT:    tail __umoddi3
 ;
 ; RV64IM-LABEL: urem64:
 ; RV64IM:       # %bb.0:
@@ -259,7 +259,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    call __umoddi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -272,7 +272,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    mv a2, a0
 ; RV32IM-NEXT:    li a0, 10
 ; RV32IM-NEXT:    li a1, 0
-; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    call __umoddi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -281,7 +281,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    tail __umoddi3 at plt
+; RV64I-NEXT:    tail __umoddi3
 ;
 ; RV64IM-LABEL: urem64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -297,7 +297,7 @@ define i64 @srem64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    call __moddi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -306,14 +306,14 @@ define i64 @srem64(i64 %a, i64 %b) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    call __moddi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: srem64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __moddi3 at plt
+; RV64I-NEXT:    tail __moddi3
 ;
 ; RV64IM-LABEL: srem64:
 ; RV64IM:       # %bb.0:
@@ -332,7 +332,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    li a0, -10
 ; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    call __moddi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -345,7 +345,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    mv a2, a0
 ; RV32IM-NEXT:    li a0, -10
 ; RV32IM-NEXT:    li a1, -1
-; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    call __moddi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -354,7 +354,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    tail __moddi3 at plt
+; RV64I-NEXT:    tail __moddi3
 ;
 ; RV64IM-LABEL: srem64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -372,7 +372,7 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    andi a1, a1, 255
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -390,7 +390,7 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a0, a0, 255
 ; RV64I-NEXT:    andi a1, a1, 255
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -412,7 +412,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a1, a0, 255
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -430,7 +430,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a1, a0, 255
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -455,7 +455,7 @@ define i8 @srem8(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    srai a1, a1, 24
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -477,7 +477,7 @@ define i8 @srem8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -502,7 +502,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a1, a0, 24
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -522,7 +522,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a1, a0, 56
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -548,7 +548,7 @@ define i16 @urem16(i16 %a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a2, a2, -1
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -570,7 +570,7 @@ define i16 @urem16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addiw a2, a2, -1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -595,7 +595,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a1, a0, 16
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -615,7 +615,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a1, a0, 48
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -640,7 +640,7 @@ define i16 @srem16(i16 %a, i16 %b) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    slli a1, a1, 16
 ; RV32I-NEXT:    srai a1, a1, 16
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -662,7 +662,7 @@ define i16 @srem16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    slli a1, a1, 48
 ; RV64I-NEXT:    srai a1, a1, 48
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -687,7 +687,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a1, a0, 16
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -707,7 +707,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a1, a0, 48
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index e1c825fc8f26bf..92ae85f560cd4d 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -70,7 +70,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(d)(s5)
 ; RV32I-NEXT:    lw a4, %lo(e)(s6)
 ; RV32I-NEXT:    li a5, 32
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:  .LBB0_5: # %if.end
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(k)(s2)
@@ -83,7 +83,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(e)(s6)
 ; RV32I-NEXT:    lw a4, %lo(f)(s7)
 ; RV32I-NEXT:    li a5, 64
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:  .LBB0_7: # %if.end5
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(j)(s3)
@@ -96,7 +96,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(f)(s7)
 ; RV32I-NEXT:    lw a4, %lo(g)(s8)
 ; RV32I-NEXT:    li a5, 32
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:  .LBB0_9: # %if.end9
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(i)(s4)
@@ -109,7 +109,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(g)(s8)
 ; RV32I-NEXT:    lw a4, %lo(h)(s9)
 ; RV32I-NEXT:    li a5, 32
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:    j .LBB0_2
 ; RV32I-NEXT:  .LBB0_11: # %for.end
 ; RV32I-NEXT:    li a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
index e0369641946738..cd7bce868eae0c 100644
--- a/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
+++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
@@ -18,9 +18,9 @@ define float @float_test(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv s0, a1
-; RV32IF-NEXT:    call __addsf3 at plt
+; RV32IF-NEXT:    call __addsf3
 ; RV32IF-NEXT:    mv a1, s0
-; RV32IF-NEXT:    call __divsf3 at plt
+; RV32IF-NEXT:    call __divsf3
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -32,9 +32,9 @@ define float @float_test(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    mv s0, a1
-; RV64IF-NEXT:    call __addsf3 at plt
+; RV64IF-NEXT:    call __addsf3
 ; RV64IF-NEXT:    mv a1, s0
-; RV64IF-NEXT:    call __divsf3 at plt
+; RV64IF-NEXT:    call __divsf3
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -53,10 +53,10 @@ define double @double_test(double %a, double %b) nounwind {
 ; RV32IF-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv s0, a3
 ; RV32IF-NEXT:    mv s1, a2
-; RV32IF-NEXT:    call __adddf3 at plt
+; RV32IF-NEXT:    call __adddf3
 ; RV32IF-NEXT:    mv a2, s1
 ; RV32IF-NEXT:    mv a3, s0
-; RV32IF-NEXT:    call __divdf3 at plt
+; RV32IF-NEXT:    call __divdf3
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -69,9 +69,9 @@ define double @double_test(double %a, double %b) nounwind {
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    mv s0, a1
-; RV64IF-NEXT:    call __adddf3 at plt
+; RV64IF-NEXT:    call __adddf3
 ; RV64IF-NEXT:    mv a1, s0
-; RV64IF-NEXT:    call __divdf3 at plt
+; RV64IF-NEXT:    call __divdf3
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
index 0269bbe9ee330d..99b111b10f66f5 100644
--- a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
+++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
@@ -21,20 +21,20 @@ define half @half_test(half %a, half %b) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s2, a1, -1
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    call __extendhfsf2 at plt
+; RV32I-NEXT:    call __extendhfsf2
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __divsf3 at plt
-; RV32I-NEXT:    call __truncsfhf2 at plt
+; RV32I-NEXT:    call __divsf3
+; RV32I-NEXT:    call __truncsfhf2
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
@@ -53,20 +53,20 @@ define half @half_test(half %a, half %b) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s2, a1, -1
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    call __extendhfsf2 at plt
+; RV64I-NEXT:    call __extendhfsf2
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __divsf3 at plt
-; RV64I-NEXT:    call __truncsfhf2 at plt
+; RV64I-NEXT:    call __divsf3
+; RV64I-NEXT:    call __truncsfhf2
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
index 321d9c612336b9..3731b9719445ea 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
@@ -42,7 +42,7 @@ define i32 @ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -105,7 +105,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s3, a1, 257
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    srli a0, s2, 1
 ; RV32I-NEXT:    or a0, s2, a0
@@ -129,7 +129,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s0, .LBB1_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -179,7 +179,7 @@ define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
@@ -229,14 +229,14 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32I-NEXT:    addi s4, a0, %lo(.LCPI3_0)
 ; RV32I-NEXT:    neg a0, s2
 ; RV32I-NEXT:    and a0, s2, a0
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s2, .LBB3_3
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    li a0, 32
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 5f9ca503bcb053..36c107061795c4 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -42,7 +42,7 @@ define i32 @ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -105,7 +105,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s3, a1, 257
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    srli a0, s2, 1
 ; RV32I-NEXT:    or a0, s2, a0
@@ -129,7 +129,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s0, .LBB1_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -179,7 +179,7 @@ define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi a1, a1, 1329
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV32I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
@@ -219,14 +219,14 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 30667
 ; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32I-NEXT:    addi s4, a0, %lo(.LCPI3_0)
 ; RV32I-NEXT:    neg a0, s2
 ; RV32I-NEXT:    and a0, s2, a0
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    bnez s2, .LBB3_3
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    li a0, 32
@@ -295,7 +295,7 @@ define i32 @ctpop_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -418,7 +418,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli s2, a0, 24
 ; RV32I-NEXT:    srli a0, s0, 1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -431,7 +431,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s5
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a1, a0, 24
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -586,7 +586,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli s5, a0, 24
 ; RV32I-NEXT:    srli a0, s0, 1
 ; RV32I-NEXT:    and a0, a0, s2
@@ -599,7 +599,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s4
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    add a0, a0, s5
 ; RV32I-NEXT:    li a1, 0
@@ -773,7 +773,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli s8, a0, 24
 ; RV32I-NEXT:    srli a0, s6, 1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -786,7 +786,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s7
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    add s8, a0, s8
 ; RV32I-NEXT:    srli a0, s5, 1
@@ -800,7 +800,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s7
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli s5, a0, 24
 ; RV32I-NEXT:    srli a0, s2, 1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -813,7 +813,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s7
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    call __mulsi3
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    add a0, a0, s5
 ; RV32I-NEXT:    sw zero, 12(s0)
diff --git a/llvm/test/CodeGen/RISCV/rv64-large-stack.ll b/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
index 535550e83eeb22..8bd99c0b639aaf 100644
--- a/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
@@ -14,7 +14,7 @@ define void @foo() nounwind {
 ; CHECK-NEXT:    addi a0, a0, -2000
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    call baz at plt
+; CHECK-NEXT:    call baz
 ; CHECK-NEXT:    lui a0, 390625
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -2000
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
index 1ae2c1cfad6888..17d9e9cefe117e 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
@@ -13,7 +13,7 @@ define i32 @udiv(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -34,7 +34,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -74,7 +74,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a1, a0, 32
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -91,7 +91,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 define i64 @udiv64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: udiv64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __udivdi3 at plt
+; RV64I-NEXT:    tail __udivdi3
 ;
 ; RV64IM-LABEL: udiv64:
 ; RV64IM:       # %bb.0:
@@ -105,7 +105,7 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV64I-LABEL: udiv64_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    tail __udivdi3 at plt
+; RV64I-NEXT:    tail __udivdi3
 ;
 ; RV64IM-LABEL: udiv64_constant:
 ; RV64IM:       # %bb.0:
@@ -125,7 +125,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    tail __udivdi3 at plt
+; RV64I-NEXT:    tail __udivdi3
 ;
 ; RV64IM-LABEL: udiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -143,7 +143,7 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a0, a0, 255
 ; RV64I-NEXT:    andi a1, a1, 255
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -165,7 +165,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a0, a0, 255
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -204,7 +204,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a1, a0, 255
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -228,7 +228,7 @@ define i16 @udiv16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addiw a2, a2, -1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -253,7 +253,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -294,7 +294,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a1, a0, 48
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -317,7 +317,7 @@ define i32 @sdiv(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -337,7 +337,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -403,7 +403,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -420,7 +420,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 define i64 @sdiv64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sdiv64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64:
 ; RV64IM:       # %bb.0:
@@ -434,7 +434,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV64I-LABEL: sdiv64_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64_constant:
 ; RV64IM:       # %bb.0:
@@ -454,7 +454,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -473,7 +473,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    tail __divdi3 at plt
+; RV64I-NEXT:    tail __divdi3
 ;
 ; RV64IM-LABEL: sdiv64_sext_operands:
 ; RV64IM:       # %bb.0:
@@ -496,7 +496,7 @@ define i8 @sdiv8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    sraiw a1, a1, 24
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    sraiw a0, a0, 24
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -521,7 +521,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    sraiw a0, a0, 24
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -575,7 +575,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    sraiw a1, a0, 24
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -600,7 +600,7 @@ define i16 @sdiv16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    sraiw a1, a1, 16
 ; RV64I-NEXT:    slli a0, a0, 16
 ; RV64I-NEXT:    sraiw a0, a0, 16
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -625,7 +625,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 16
 ; RV64I-NEXT:    sraiw a0, a0, 16
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -679,7 +679,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 16
 ; RV64I-NEXT:    sraiw a1, a0, 16
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
index 76ab0e7d5810e7..de4c21f3246883 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
@@ -325,7 +325,7 @@ define void @addi_fold_crash(i64 %arg) nounwind {
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    sb zero, 0(a0)
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    call snork at plt
+; RV64I-NEXT:    call snork
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
index 11adbbdd245f1d..9d7b77de03eefa 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
@@ -13,7 +13,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -34,7 +34,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a1, a0, 32
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -55,7 +55,7 @@ define i32 @srem(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -121,7 +121,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -138,7 +138,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 define i64 @urem64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: urem64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __umoddi3 at plt
+; RV64I-NEXT:    tail __umoddi3
 ;
 ; RV64IM-LABEL: urem64:
 ; RV64IM:       # %bb.0:
@@ -153,7 +153,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    tail __umoddi3 at plt
+; RV64I-NEXT:    tail __umoddi3
 ;
 ; RV64IM-LABEL: urem64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -167,7 +167,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 define i64 @srem64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: srem64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    tail __moddi3 at plt
+; RV64I-NEXT:    tail __moddi3
 ;
 ; RV64IM-LABEL: srem64:
 ; RV64IM:       # %bb.0:
@@ -182,7 +182,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    tail __moddi3 at plt
+; RV64I-NEXT:    tail __moddi3
 ;
 ; RV64IM-LABEL: srem64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -200,7 +200,7 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a0, a0, 255
 ; RV64I-NEXT:    andi a1, a1, 255
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -222,7 +222,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a1, a0, 255
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -247,7 +247,7 @@ define i8 @srem8(i8 %a, i8 %b) nounwind {
 ; RV64I-NEXT:    sraiw a1, a1, 24
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    sraiw a0, a0, 24
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -272,7 +272,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 24
 ; RV64I-NEXT:    sraiw a1, a0, 24
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -298,7 +298,7 @@ define i16 @urem16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    addiw a2, a2, -1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -323,7 +323,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a1, a0, 48
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -348,7 +348,7 @@ define i16 @srem16(i16 %a, i16 %b) nounwind {
 ; RV64I-NEXT:    sraiw a1, a1, 16
 ; RV64I-NEXT:    slli a0, a0, 16
 ; RV64I-NEXT:    sraiw a0, a0, 16
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -373,7 +373,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 16
 ; RV64I-NEXT:    sraiw a1, a0, 16
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
index 3e2e6ac75af831..4ec7f2660b2a35 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -97,7 +97,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -162,7 +162,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a1, a0, 24
 ; RV64I-NEXT:  .LBB2_2: # %cond.end
 ; RV64I-NEXT:    subw a0, s0, a1
@@ -223,7 +223,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    xori a0, a0, 31
 ; RV64I-NEXT:    snez a1, s0
@@ -290,7 +290,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -358,7 +358,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -388,7 +388,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -411,7 +411,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64XTHEADBB-NEXT:    and a0, a0, a1
 ; RV64XTHEADBB-NEXT:    lui a1, 30667
 ; RV64XTHEADBB-NEXT:    addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT:    call __muldi3 at plt
+; RV64XTHEADBB-NEXT:    call __muldi3
 ; RV64XTHEADBB-NEXT:    srliw a0, a0, 27
 ; RV64XTHEADBB-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64XTHEADBB-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -440,7 +440,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_0)
@@ -458,7 +458,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64XTHEADBB-NEXT:    and a0, a0, a1
 ; RV64XTHEADBB-NEXT:    lui a1, 30667
 ; RV64XTHEADBB-NEXT:    addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT:    call __muldi3 at plt
+; RV64XTHEADBB-NEXT:    call __muldi3
 ; RV64XTHEADBB-NEXT:    srliw a0, a0, 27
 ; RV64XTHEADBB-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64XTHEADBB-NEXT:    addi a1, a1, %lo(.LCPI7_0)
@@ -482,7 +482,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
@@ -506,7 +506,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64XTHEADBB-NEXT:    and a0, s0, a0
 ; RV64XTHEADBB-NEXT:    lui a1, 30667
 ; RV64XTHEADBB-NEXT:    addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT:    call __muldi3 at plt
+; RV64XTHEADBB-NEXT:    call __muldi3
 ; RV64XTHEADBB-NEXT:    srliw a0, a0, 27
 ; RV64XTHEADBB-NEXT:    lui a1, %hi(.LCPI8_0)
 ; RV64XTHEADBB-NEXT:    addi a1, a1, %lo(.LCPI8_0)
@@ -536,7 +536,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
@@ -563,7 +563,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64XTHEADBB-NEXT:    and a0, s0, a0
 ; RV64XTHEADBB-NEXT:    lui a1, 30667
 ; RV64XTHEADBB-NEXT:    addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT:    call __muldi3 at plt
+; RV64XTHEADBB-NEXT:    call __muldi3
 ; RV64XTHEADBB-NEXT:    srliw a0, a0, 27
 ; RV64XTHEADBB-NEXT:    lui a1, %hi(.LCPI9_0)
 ; RV64XTHEADBB-NEXT:    addi a1, a1, %lo(.LCPI9_0)
@@ -599,7 +599,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI10_1)
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
index 1170a3011b9ba8..9b3f206be4a08f 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -95,7 +95,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -158,7 +158,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a1, a0, 24
 ; RV64I-NEXT:  .LBB2_2: # %cond.end
 ; RV64I-NEXT:    subw a0, s0, a1
@@ -216,7 +216,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    xori a0, a0, 31
 ; RV64I-NEXT:    snez a1, s0
@@ -281,7 +281,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -348,7 +348,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -378,7 +378,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -408,7 +408,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_0)
@@ -437,7 +437,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
@@ -475,7 +475,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
@@ -521,7 +521,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI10_1)
@@ -567,7 +567,7 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -605,7 +605,7 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -654,7 +654,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
index af1eb318cb466f..2db8e2c9b3d1e2 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
@@ -374,7 +374,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB19_2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ;
 ; RV64ZBS-LABEL: bext_i32_trunc:
 ; RV64ZBS:       # %bb.0:
@@ -383,7 +383,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
 ; RV64ZBS-NEXT:  # %bb.1:
 ; RV64ZBS-NEXT:    ret
 ; RV64ZBS-NEXT:  .LBB19_2:
-; RV64ZBS-NEXT:    tail bar at plt
+; RV64ZBS-NEXT:    tail bar
   %3 = shl i32 1, %1
   %4 = and i32 %3, %0
   %5 = icmp eq i32 %4, 0
diff --git a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
index 690828c7794346..16f4119ef20b40 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
@@ -15,11 +15,11 @@ define i64 @complex_float_add(i64 %a.coerce, i64 %b.coerce) nounwind {
 ; CHECK-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    srli s0, a0, 32
 ; CHECK-NEXT:    srli s1, a1, 32
-; CHECK-NEXT:    call __addsf3 at plt
+; CHECK-NEXT:    call __addsf3
 ; CHECK-NEXT:    mv s2, a0
 ; CHECK-NEXT:    mv a0, s0
 ; CHECK-NEXT:    mv a1, s1
-; CHECK-NEXT:    call __addsf3 at plt
+; CHECK-NEXT:    call __addsf3
 ; CHECK-NEXT:    slli a0, a0, 32
 ; CHECK-NEXT:    slli s2, s2, 32
 ; CHECK-NEXT:    srli a1, s2, 32
diff --git a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
index 25278caee64d77..6fdf2a3a939ce8 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
@@ -15,7 +15,7 @@ define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunsdfsi at plt
+; RV64I-NEXT:    call __fixunsdfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -24,7 +24,7 @@ define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
 ; RV64IF:       # %bb.0: # %entry
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call __fixunsdfsi at plt
+; RV64IF-NEXT:    call __fixunsdfsi
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -38,7 +38,7 @@ define i32 @strict_fp64_to_si32(double %a) nounwind strictfp {
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixdfsi at plt
+; RV64I-NEXT:    call __fixdfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -47,7 +47,7 @@ define i32 @strict_fp64_to_si32(double %a) nounwind strictfp {
 ; RV64IF:       # %bb.0: # %entry
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call __fixdfsi at plt
+; RV64IF-NEXT:    call __fixdfsi
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
index b7e112bbb7b5fb..b645b621c75c6a 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
@@ -13,7 +13,7 @@ define i32 @strict_fp32_to_ui32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    call __fixunssfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -27,7 +27,7 @@ define i32 @strict_fp32_to_si32(float %a) nounwind strictfp {
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __fixsfsi at plt
+; RV64I-NEXT:    call __fixsfsi
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
index c8cd710fe9ae44..1f62ea9f568191 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -97,7 +97,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -162,7 +162,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a1, a0, 24
 ; RV64I-NEXT:  .LBB2_2: # %cond.end
 ; RV64I-NEXT:    sub a0, s0, a1
@@ -222,7 +222,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    xori a0, a0, 31
 ; RV64I-NEXT:    snez a1, s0
@@ -289,7 +289,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -358,7 +358,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -388,7 +388,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -428,7 +428,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_0)
@@ -462,7 +462,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
@@ -505,7 +505,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
@@ -552,7 +552,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI10_1)
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 8123721ed31615..2269d8d04c9cb0 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -95,7 +95,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -158,7 +158,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a1, a0, 24
 ; RV64I-NEXT:  .LBB2_2: # %cond.end
 ; RV64I-NEXT:    sub a0, s0, a1
@@ -216,7 +216,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    xori a0, a0, 31
 ; RV64I-NEXT:    snez a1, s0
@@ -281,7 +281,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -348,7 +348,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -378,7 +378,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -408,7 +408,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_0)
@@ -437,7 +437,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
@@ -475,7 +475,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, 30667
 ; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
@@ -518,7 +518,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 58
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_1)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI10_1)
@@ -564,7 +564,7 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -678,7 +678,7 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -727,7 +727,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
 ; RV64I-NEXT:    lui a1, 4112
 ; RV64I-NEXT:    addiw s1, a1, 257
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw s2, a0, 24
 ; RV64I-NEXT:    srli a0, s0, 1
 ; RV64I-NEXT:    and a0, a0, s3
@@ -740,7 +740,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    and a0, a0, s5
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srliw a1, a0, 24
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
@@ -903,7 +903,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    slli a2, a1, 32
 ; RV64I-NEXT:    add a1, a1, a2
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1034,7 +1034,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
 ; RV64I-NEXT:    slli a1, s1, 32
 ; RV64I-NEXT:    add s1, s1, a1
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli s2, a0, 56
 ; RV64I-NEXT:    srli a0, s0, 1
 ; RV64I-NEXT:    and a0, a0, s3
@@ -1047,7 +1047,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    and a0, a0, s5
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    srli a1, a0, 56
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index 016b0924eaf199..d370b1877470c4 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -376,7 +376,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB19_2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ;
 ; RV64ZBS-LABEL: bext_i32_trunc:
 ; RV64ZBS:       # %bb.0:
@@ -385,7 +385,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
 ; RV64ZBS-NEXT:  # %bb.1:
 ; RV64ZBS-NEXT:    ret
 ; RV64ZBS-NEXT:  .LBB19_2:
-; RV64ZBS-NEXT:    tail bar at plt
+; RV64ZBS-NEXT:    tail bar
   %3 = shl i32 1, %1
   %4 = and i32 %3, %0
   %5 = icmp eq i32 %4, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 94218455a984ea..661b79141fee53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -313,7 +313,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
 ; RV32-NEXT:    vs8r.v v16, (a1)
 ; RV32-NEXT:    vmv8r.v v8, v0
 ; RV32-NEXT:    vmv8r.v v16, v24
-; RV32-NEXT:    call ext2 at plt
+; RV32-NEXT:    call ext2
 ; RV32-NEXT:    addi sp, s0, -144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
@@ -347,7 +347,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
 ; RV64-NEXT:    vs8r.v v16, (a1)
 ; RV64-NEXT:    vmv8r.v v8, v0
 ; RV64-NEXT:    vmv8r.v v16, v24
-; RV64-NEXT:    call ext2 at plt
+; RV64-NEXT:    call ext2
 ; RV64-NEXT:    addi sp, s0, -144
 ; RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
@@ -419,7 +419,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 128
 ; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    call ext3 at plt
+; RV32-NEXT:    call ext3
 ; RV32-NEXT:    addi sp, s0, -144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
@@ -487,7 +487,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 128
 ; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    call ext3 at plt
+; RV64-NEXT:    call ext3
 ; RV64-NEXT:    addi sp, s0, -144
 ; RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
@@ -562,7 +562,7 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
 ; RV32-NEXT:    vs8r.v v8, (a0)
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    vmv.v.i v16, 0
-; RV32-NEXT:    call vector_arg_indirect_stack at plt
+; RV32-NEXT:    call vector_arg_indirect_stack
 ; RV32-NEXT:    addi sp, s0, -144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
@@ -613,7 +613,7 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
 ; RV64-NEXT:    vs8r.v v8, (a0)
 ; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    vmv.v.i v16, 0
-; RV64-NEXT:    call vector_arg_indirect_stack at plt
+; RV64-NEXT:    call vector_arg_indirect_stack
 ; RV64-NEXT:    addi sp, s0, -144
 ; RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index a9f5f857095072..78385a80b47ebd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -45,7 +45,7 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
 ; RV32-NEXT:    vmv.v.i v8, 0
 ; RV32-NEXT:    addi a0, sp, 128
 ; RV32-NEXT:    vmv.v.i v16, 0
-; RV32-NEXT:    call callee_scalable_vector_split_indirect at plt
+; RV32-NEXT:    call callee_scalable_vector_split_indirect
 ; RV32-NEXT:    addi sp, s0, -144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
@@ -76,7 +76,7 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
 ; RV64-NEXT:    vmv.v.i v8, 0
 ; RV64-NEXT:    addi a0, sp, 128
 ; RV64-NEXT:    vmv.v.i v16, 0
-; RV64-NEXT:    call callee_scalable_vector_split_indirect at plt
+; RV64-NEXT:    call callee_scalable_vector_split_indirect
 ; RV64-NEXT:    addi sp, s0, -144
 ; RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index d531ff62a1d517..9ec15e5710f72d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -254,7 +254,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX8-NEXT:    li a1, 2
 ; LMULMAX8-NEXT:    vmv8r.v v8, v16
 ; LMULMAX8-NEXT:    vmv8r.v v16, v24
-; LMULMAX8-NEXT:    call ext2 at plt
+; LMULMAX8-NEXT:    call ext2
 ; LMULMAX8-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    addi sp, sp, 16
 ; LMULMAX8-NEXT:    ret
@@ -272,7 +272,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX4-NEXT:    vmv4r.v v12, v20
 ; LMULMAX4-NEXT:    vmv4r.v v16, v28
 ; LMULMAX4-NEXT:    vmv4r.v v20, v24
-; LMULMAX4-NEXT:    call ext2 at plt
+; LMULMAX4-NEXT:    call ext2
 ; LMULMAX4-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    addi sp, sp, 16
 ; LMULMAX4-NEXT:    ret
@@ -300,7 +300,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
 ; LMULMAX8-NEXT:    li a2, 42
 ; LMULMAX8-NEXT:    vse32.v v8, (a3)
 ; LMULMAX8-NEXT:    vmv.v.v v8, v24
-; LMULMAX8-NEXT:    call ext3 at plt
+; LMULMAX8-NEXT:    call ext3
 ; LMULMAX8-NEXT:    addi sp, s0, -256
 ; LMULMAX8-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -330,7 +330,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
 ; LMULMAX4-NEXT:    vse32.v v8, (a1)
 ; LMULMAX4-NEXT:    vmv.v.v v8, v24
 ; LMULMAX4-NEXT:    vmv.v.v v12, v28
-; LMULMAX4-NEXT:    call ext3 at plt
+; LMULMAX4-NEXT:    call ext3
 ; LMULMAX4-NEXT:    addi sp, s0, -256
 ; LMULMAX4-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -394,7 +394,7 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
 ; LMULMAX8-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-NEXT:    li a0, 0
 ; LMULMAX8-NEXT:    vmv.v.i v16, 0
-; LMULMAX8-NEXT:    call vector_arg_indirect_stack at plt
+; LMULMAX8-NEXT:    call vector_arg_indirect_stack
 ; LMULMAX8-NEXT:    addi sp, s0, -256
 ; LMULMAX8-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -431,7 +431,7 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
 ; LMULMAX4-NEXT:    vmv.v.i v12, 0
 ; LMULMAX4-NEXT:    vmv.v.i v16, 0
 ; LMULMAX4-NEXT:    vmv.v.i v20, 0
-; LMULMAX4-NEXT:    call vector_arg_indirect_stack at plt
+; LMULMAX4-NEXT:    call vector_arg_indirect_stack
 ; LMULMAX4-NEXT:    addi sp, s0, -256
 ; LMULMAX4-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -501,7 +501,7 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
 ; LMULMAX8-NEXT:    sd a0, 0(sp)
 ; LMULMAX8-NEXT:    li a0, 0
 ; LMULMAX8-NEXT:    vmv.v.i v16, 0
-; LMULMAX8-NEXT:    call vector_arg_direct_stack at plt
+; LMULMAX8-NEXT:    call vector_arg_direct_stack
 ; LMULMAX8-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    addi sp, sp, 160
 ; LMULMAX8-NEXT:    ret
@@ -538,7 +538,7 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
 ; LMULMAX4-NEXT:    vmv.v.i v12, 0
 ; LMULMAX4-NEXT:    vmv.v.i v16, 0
 ; LMULMAX4-NEXT:    vmv.v.i v20, 0
-; LMULMAX4-NEXT:    call vector_arg_direct_stack at plt
+; LMULMAX4-NEXT:    call vector_arg_direct_stack
 ; LMULMAX4-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    addi sp, sp, 160
 ; LMULMAX4-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 775fd4984b896b..8e3a432b8ac834 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -693,7 +693,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX8-NEXT:    li a1, 2
 ; LMULMAX8-NEXT:    vmv8r.v v8, v16
 ; LMULMAX8-NEXT:    vmv8r.v v16, v24
-; LMULMAX8-NEXT:    call ext2 at plt
+; LMULMAX8-NEXT:    call ext2
 ; LMULMAX8-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    addi sp, sp, 16
 ; LMULMAX8-NEXT:    ret
@@ -711,7 +711,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX4-NEXT:    vmv4r.v v12, v20
 ; LMULMAX4-NEXT:    vmv4r.v v16, v28
 ; LMULMAX4-NEXT:    vmv4r.v v20, v24
-; LMULMAX4-NEXT:    call ext2 at plt
+; LMULMAX4-NEXT:    call ext2
 ; LMULMAX4-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    addi sp, sp, 16
 ; LMULMAX4-NEXT:    ret
@@ -735,7 +735,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX2-NEXT:    vmv2r.v v18, v28
 ; LMULMAX2-NEXT:    vmv2r.v v20, v26
 ; LMULMAX2-NEXT:    vmv2r.v v22, v24
-; LMULMAX2-NEXT:    call ext2 at plt
+; LMULMAX2-NEXT:    call ext2
 ; LMULMAX2-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LMULMAX2-NEXT:    addi sp, sp, 16
 ; LMULMAX2-NEXT:    ret
@@ -771,7 +771,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX1-NEXT:    vmv1r.v v21, v26
 ; LMULMAX1-NEXT:    vmv1r.v v22, v25
 ; LMULMAX1-NEXT:    vmv1r.v v23, v24
-; LMULMAX1-NEXT:    call ext2 at plt
+; LMULMAX1-NEXT:    call ext2
 ; LMULMAX1-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LMULMAX1-NEXT:    addi sp, sp, 16
 ; LMULMAX1-NEXT:    ret
@@ -799,7 +799,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX8-NEXT:    li a2, 42
 ; LMULMAX8-NEXT:    vse32.v v8, (a3)
 ; LMULMAX8-NEXT:    vmv.v.v v8, v24
-; LMULMAX8-NEXT:    call ext3 at plt
+; LMULMAX8-NEXT:    call ext3
 ; LMULMAX8-NEXT:    addi sp, s0, -256
 ; LMULMAX8-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -829,7 +829,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX4-NEXT:    vse32.v v8, (a1)
 ; LMULMAX4-NEXT:    vmv.v.v v8, v24
 ; LMULMAX4-NEXT:    vmv.v.v v12, v28
-; LMULMAX4-NEXT:    call ext3 at plt
+; LMULMAX4-NEXT:    call ext3
 ; LMULMAX4-NEXT:    addi sp, s0, -256
 ; LMULMAX4-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -869,7 +869,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX2-NEXT:    vmv.v.v v10, v26
 ; LMULMAX2-NEXT:    vmv.v.v v12, v28
 ; LMULMAX2-NEXT:    vmv.v.v v14, v30
-; LMULMAX2-NEXT:    call ext3 at plt
+; LMULMAX2-NEXT:    call ext3
 ; LMULMAX2-NEXT:    addi sp, s0, -256
 ; LMULMAX2-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX2-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -936,7 +936,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX1-NEXT:    vmv.v.v v13, v29
 ; LMULMAX1-NEXT:    vmv.v.v v14, v30
 ; LMULMAX1-NEXT:    vmv.v.v v15, v31
-; LMULMAX1-NEXT:    call ext3 at plt
+; LMULMAX1-NEXT:    call ext3
 ; LMULMAX1-NEXT:    addi sp, sp, 16
 ; LMULMAX1-NEXT:    addi sp, s0, -256
 ; LMULMAX1-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
@@ -1043,7 +1043,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
 ; LMULMAX8-NEXT:    vmv1r.v v10, v8
 ; LMULMAX8-NEXT:    vmv1r.v v11, v8
 ; LMULMAX8-NEXT:    vmv1r.v v12, v8
-; LMULMAX8-NEXT:    call split_vector_args at plt
+; LMULMAX8-NEXT:    call split_vector_args
 ; LMULMAX8-NEXT:    addi sp, s0, -256
 ; LMULMAX8-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -1076,7 +1076,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
 ; LMULMAX4-NEXT:    vmv1r.v v10, v8
 ; LMULMAX4-NEXT:    vmv1r.v v11, v8
 ; LMULMAX4-NEXT:    vmv1r.v v12, v8
-; LMULMAX4-NEXT:    call split_vector_args at plt
+; LMULMAX4-NEXT:    call split_vector_args
 ; LMULMAX4-NEXT:    addi sp, s0, -256
 ; LMULMAX4-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -1116,7 +1116,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
 ; LMULMAX2-NEXT:    vmv1r.v v11, v8
 ; LMULMAX2-NEXT:    vmv1r.v v12, v8
 ; LMULMAX2-NEXT:    vmv.v.v v22, v14
-; LMULMAX2-NEXT:    call split_vector_args at plt
+; LMULMAX2-NEXT:    call split_vector_args
 ; LMULMAX2-NEXT:    addi sp, s0, -128
 ; LMULMAX2-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; LMULMAX2-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
@@ -1170,7 +1170,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
 ; LMULMAX1-NEXT:    vmv.v.v v21, v13
 ; LMULMAX1-NEXT:    vmv.v.v v22, v14
 ; LMULMAX1-NEXT:    vmv.v.v v23, v15
-; LMULMAX1-NEXT:    call split_vector_args at plt
+; LMULMAX1-NEXT:    call split_vector_args
 ; LMULMAX1-NEXT:    addi sp, s0, -128
 ; LMULMAX1-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; LMULMAX1-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
@@ -1273,7 +1273,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX8-NEXT:    sd a0, 128(sp)
 ; LMULMAX8-NEXT:    li a0, 0
 ; LMULMAX8-NEXT:    vmv.v.i v16, 0
-; LMULMAX8-NEXT:    call vector_arg_via_stack at plt
+; LMULMAX8-NEXT:    call vector_arg_via_stack
 ; LMULMAX8-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    addi sp, sp, 144
 ; LMULMAX8-NEXT:    ret
@@ -1302,7 +1302,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX4-NEXT:    vmv.v.i v12, 0
 ; LMULMAX4-NEXT:    vmv.v.i v16, 0
 ; LMULMAX4-NEXT:    vmv.v.i v20, 0
-; LMULMAX4-NEXT:    call vector_arg_via_stack at plt
+; LMULMAX4-NEXT:    call vector_arg_via_stack
 ; LMULMAX4-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    addi sp, sp, 144
 ; LMULMAX4-NEXT:    ret
@@ -1339,7 +1339,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX2-NEXT:    vmv.v.i v18, 0
 ; LMULMAX2-NEXT:    vmv.v.i v20, 0
 ; LMULMAX2-NEXT:    vmv.v.i v22, 0
-; LMULMAX2-NEXT:    call vector_arg_via_stack at plt
+; LMULMAX2-NEXT:    call vector_arg_via_stack
 ; LMULMAX2-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; LMULMAX2-NEXT:    addi sp, sp, 144
 ; LMULMAX2-NEXT:    ret
@@ -1392,7 +1392,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX1-NEXT:    vmv.v.i v21, 0
 ; LMULMAX1-NEXT:    vmv.v.i v22, 0
 ; LMULMAX1-NEXT:    vmv.v.i v23, 0
-; LMULMAX1-NEXT:    call vector_arg_via_stack at plt
+; LMULMAX1-NEXT:    call vector_arg_via_stack
 ; LMULMAX1-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; LMULMAX1-NEXT:    addi sp, sp, 144
 ; LMULMAX1-NEXT:    ret
@@ -1447,7 +1447,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX8-NEXT:    li a3, 0
 ; LMULMAX8-NEXT:    li a4, 0
 ; LMULMAX8-NEXT:    vmv8r.v v16, v8
-; LMULMAX8-NEXT:    call vector_mask_arg_via_stack at plt
+; LMULMAX8-NEXT:    call vector_mask_arg_via_stack
 ; LMULMAX8-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LMULMAX8-NEXT:    addi sp, sp, 160
 ; LMULMAX8-NEXT:    ret
@@ -1487,7 +1487,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX4-NEXT:    vmv4r.v v12, v8
 ; LMULMAX4-NEXT:    vmv4r.v v16, v8
 ; LMULMAX4-NEXT:    vmv4r.v v20, v8
-; LMULMAX4-NEXT:    call vector_mask_arg_via_stack at plt
+; LMULMAX4-NEXT:    call vector_mask_arg_via_stack
 ; LMULMAX4-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LMULMAX4-NEXT:    addi sp, sp, 160
 ; LMULMAX4-NEXT:    ret
@@ -1535,7 +1535,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX2-NEXT:    vmv2r.v v18, v8
 ; LMULMAX2-NEXT:    vmv2r.v v20, v8
 ; LMULMAX2-NEXT:    vmv2r.v v22, v8
-; LMULMAX2-NEXT:    call vector_mask_arg_via_stack at plt
+; LMULMAX2-NEXT:    call vector_mask_arg_via_stack
 ; LMULMAX2-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LMULMAX2-NEXT:    addi sp, sp, 160
 ; LMULMAX2-NEXT:    ret
@@ -1599,7 +1599,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX1-NEXT:    vmv1r.v v21, v8
 ; LMULMAX1-NEXT:    vmv1r.v v22, v8
 ; LMULMAX1-NEXT:    vmv1r.v v23, v8
-; LMULMAX1-NEXT:    call vector_mask_arg_via_stack at plt
+; LMULMAX1-NEXT:    call vector_mask_arg_via_stack
 ; LMULMAX1-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LMULMAX1-NEXT:    addi sp, sp, 160
 ; LMULMAX1-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
index c261019e2e124e..0403ceda8f11c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
@@ -18,7 +18,7 @@
   ; CHECK-NEXT:    addi a1, sp, 24
   ; CHECK-NEXT:    vs1r.v v25, (a1) # Unknown-size Folded Spill
   ; CHECK-NEXT:    ld a1, 0(sp)
-  ; CHECK-NEXT:    call fixedlen_vector_spillslot at plt
+  ; CHECK-NEXT:    call fixedlen_vector_spillslot
   ; CHECK-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
   ; CHECK-NEXT:    addi sp, sp, 48
   ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index 06d1ada300a1d2..e969da6fd45b71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -616,7 +616,7 @@ define i32 @extractelt_v32i32_idx(ptr %x, i32 zeroext %idx) nounwind {
 ; RV32NOM-NEXT:    mv s2, a0
 ; RV32NOM-NEXT:    andi a0, a1, 31
 ; RV32NOM-NEXT:    li a1, 4
-; RV32NOM-NEXT:    call __mulsi3 at plt
+; RV32NOM-NEXT:    call __mulsi3
 ; RV32NOM-NEXT:    li a1, 32
 ; RV32NOM-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32NOM-NEXT:    vle32.v v8, (s2)
@@ -666,7 +666,7 @@ define i32 @extractelt_v32i32_idx(ptr %x, i32 zeroext %idx) nounwind {
 ; RV64NOM-NEXT:    mv s2, a0
 ; RV64NOM-NEXT:    andi a0, a1, 31
 ; RV64NOM-NEXT:    li a1, 4
-; RV64NOM-NEXT:    call __muldi3 at plt
+; RV64NOM-NEXT:    call __muldi3
 ; RV64NOM-NEXT:    li a1, 32
 ; RV64NOM-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV64NOM-NEXT:    vle32.v v8, (s2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index 7c5047bbdf6352..d55683e653d246 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -13,7 +13,7 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
@@ -49,7 +49,7 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
 ; RV32-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
@@ -62,7 +62,7 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    csrr a2, vlenb
 ; RV32-NEXT:    add a2, sp, a2
@@ -112,7 +112,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
 ; RV32-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
@@ -126,7 +126,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -142,7 +142,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 2
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -158,7 +158,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -218,7 +218,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
 ; RV32-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
@@ -232,7 +232,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -248,7 +248,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 2
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -264,7 +264,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -325,7 +325,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 68(sp)
 ; RV32-NEXT:    sw a0, 64(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -333,7 +333,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 7
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 124(sp)
 ; RV32-NEXT:    sw a0, 120(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -341,7 +341,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 6
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 116(sp)
 ; RV32-NEXT:    sw a0, 112(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -349,7 +349,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 5
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 108(sp)
 ; RV32-NEXT:    sw a0, 104(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -357,7 +357,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 4
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 100(sp)
 ; RV32-NEXT:    sw a0, 96(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -365,7 +365,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 92(sp)
 ; RV32-NEXT:    sw a0, 88(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -373,7 +373,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 2
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 84(sp)
 ; RV32-NEXT:    sw a0, 80(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -381,7 +381,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 76(sp)
 ; RV32-NEXT:    sw a0, 72(sp)
 ; RV32-NEXT:    addi a0, sp, 64
@@ -471,42 +471,42 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    flw fa0, 124(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 252(sp)
 ; RV32-NEXT:    sw a0, 248(sp)
 ; RV32-NEXT:    flw fa0, 120(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 244(sp)
 ; RV32-NEXT:    sw a0, 240(sp)
 ; RV32-NEXT:    flw fa0, 116(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 236(sp)
 ; RV32-NEXT:    sw a0, 232(sp)
 ; RV32-NEXT:    flw fa0, 112(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 228(sp)
 ; RV32-NEXT:    sw a0, 224(sp)
 ; RV32-NEXT:    flw fa0, 108(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 220(sp)
 ; RV32-NEXT:    sw a0, 216(sp)
 ; RV32-NEXT:    flw fa0, 104(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 212(sp)
 ; RV32-NEXT:    sw a0, 208(sp)
 ; RV32-NEXT:    flw fa0, 100(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 204(sp)
 ; RV32-NEXT:    sw a0, 200(sp)
 ; RV32-NEXT:    flw fa0, 96(sp)
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 196(sp)
 ; RV32-NEXT:    sw a0, 192(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    addi a0, sp, 384
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 132(sp)
 ; RV32-NEXT:    sw a0, 128(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -514,7 +514,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 156(sp)
 ; RV32-NEXT:    sw a0, 152(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -522,7 +522,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 2
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 148(sp)
 ; RV32-NEXT:    sw a0, 144(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -530,7 +530,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 140(sp)
 ; RV32-NEXT:    sw a0, 136(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -538,7 +538,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 7
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 188(sp)
 ; RV32-NEXT:    sw a0, 184(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -546,7 +546,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 6
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 180(sp)
 ; RV32-NEXT:    sw a0, 176(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -554,7 +554,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 5
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 172(sp)
 ; RV32-NEXT:    sw a0, 168(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
@@ -562,7 +562,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 4
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrintf at plt
+; RV32-NEXT:    call llrintf
 ; RV32-NEXT:    sw a1, 164(sp)
 ; RV32-NEXT:    sw a0, 160(sp)
 ; RV32-NEXT:    li a0, 32
@@ -668,7 +668,7 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
@@ -703,7 +703,7 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
 ; RV32-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
@@ -716,7 +716,7 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
 ; RV32-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    csrr a2, vlenb
 ; RV32-NEXT:    add a2, sp, a2
@@ -766,7 +766,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
 ; RV32-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
@@ -780,7 +780,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -796,7 +796,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 2
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -812,7 +812,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
 ; RV32-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    addi a2, sp, 16
 ; RV32-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -875,26 +875,26 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    fld fa0, 120(sp)
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 188(sp)
 ; RV32-NEXT:    sw a0, 184(sp)
 ; RV32-NEXT:    fld fa0, 112(sp)
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 180(sp)
 ; RV32-NEXT:    sw a0, 176(sp)
 ; RV32-NEXT:    fld fa0, 104(sp)
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 172(sp)
 ; RV32-NEXT:    sw a0, 168(sp)
 ; RV32-NEXT:    fld fa0, 96(sp)
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 164(sp)
 ; RV32-NEXT:    sw a0, 160(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    addi a0, sp, 256
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 132(sp)
 ; RV32-NEXT:    sw a0, 128(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -902,7 +902,7 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 140(sp)
 ; RV32-NEXT:    sw a0, 136(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
@@ -910,7 +910,7 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 156(sp)
 ; RV32-NEXT:    sw a0, 152(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
@@ -918,7 +918,7 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
 ; RV32-NEXT:    vslidedown.vi v8, v8, 2
 ; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llrint at plt
+; RV32-NEXT:    call llrint
 ; RV32-NEXT:    sw a1, 148(sp)
 ; RV32-NEXT:    sw a0, 144(sp)
 ; RV32-NEXT:    addi a0, sp, 128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 4e576f12e10768..34339a6f613f69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -1429,7 +1429,7 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl)
 ; RV32-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1450,7 +1450,7 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl)
 ; RV64-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -1479,7 +1479,7 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
 ; RV32-NEXT:    vmul.vv v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1504,7 +1504,7 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
 ; RV64-NEXT:    vmul.vv v8, v8, v9
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1537,7 +1537,7 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
 ; RV32-NEXT:    vmul.vv v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1564,7 +1564,7 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
 ; RV64-NEXT:    vmul.vv v8, v8, v9
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1599,7 +1599,7 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
 ; RV32-NEXT:    vmul.vv v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1628,7 +1628,7 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
 ; RV64-NEXT:    vmul.vv v8, v8, v9
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1665,7 +1665,7 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
 ; RV32-NEXT:    vmul.vv v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1696,7 +1696,7 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
 ; RV64-NEXT:    vmul.vv v8, v8, v9
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1736,7 +1736,7 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m,
 ; RV32-NEXT:    vmul.vv v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1770,7 +1770,7 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m,
 ; RV64-NEXT:    vmul.vv v8, v8, v10
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1820,7 +1820,7 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
 ; RV32-NEXT:    vmul.vv v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1864,7 +1864,7 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
 ; RV64-NEXT:    vmul.vv v8, v8, v12
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index f1a82b9e427e25..783738f918d061 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -352,17 +352,17 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 16(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    lui a1, 524288
 ; CHECK-NOV-NEXT:    addiw a4, a1, -1
@@ -446,14 +446,14 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    lhu s2, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -466,7 +466,7 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -482,7 +482,7 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -540,17 +540,17 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 8(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.lu.s s1, fs2, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    li a1, -1
 ; CHECK-NOV-NEXT:    srli a1, a1, 32
@@ -614,14 +614,14 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    lhu s2, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -634,7 +634,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -650,7 +650,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -706,17 +706,17 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 8(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    li a2, -1
 ; CHECK-NOV-NEXT:    srli a2, a2, 32
@@ -792,14 +792,14 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    lhu s2, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -812,7 +812,7 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -828,7 +828,7 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1236,29 +1236,29 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 48(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs6, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs5, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs4, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs3, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    lui a7, 8
 ; CHECK-NOV-NEXT:    addiw a7, a7, -1
@@ -1416,14 +1416,14 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s6, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1432,7 +1432,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1441,7 +1441,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1450,7 +1450,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s3
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1459,7 +1459,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1468,7 +1468,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 5
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1477,7 +1477,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 6
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1556,29 +1556,29 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 8(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs6, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs5, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs4, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs3, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.lu.s s1, fs6, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    lui a1, 16
 ; CHECK-NOV-NEXT:    addiw a1, a1, -1
@@ -1694,14 +1694,14 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s6, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1710,7 +1710,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1719,7 +1719,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1728,7 +1728,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s3
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1737,7 +1737,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1746,7 +1746,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 5
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1755,7 +1755,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 6
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -1832,29 +1832,29 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 8(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs6, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs5, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs4, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs3, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    lui a3, 16
 ; CHECK-NOV-NEXT:    addiw a3, a3, -1
@@ -1994,14 +1994,14 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s6, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -2010,7 +2010,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -2019,7 +2019,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -2028,7 +2028,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s3
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -2037,7 +2037,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -2046,7 +2046,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 5
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -2055,7 +2055,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 6
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -2107,11 +2107,11 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.d fs0, fa1
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.d fa0, fs0
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv a2, a0
 ; CHECK-NOV-NEXT:    li a0, -1
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
@@ -2192,14 +2192,14 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    li a2, -1
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB18_3
@@ -2287,11 +2287,11 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.d fs0, fa1
-; CHECK-NOV-NEXT:    call __fixunsdfti at plt
+; CHECK-NOV-NEXT:    call __fixunsdfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.d fa0, fs0
-; CHECK-NOV-NEXT:    call __fixunsdfti at plt
+; CHECK-NOV-NEXT:    call __fixunsdfti
 ; CHECK-NOV-NEXT:    snez a1, a1
 ; CHECK-NOV-NEXT:    snez a2, s1
 ; CHECK-NOV-NEXT:    addi a2, a2, -1
@@ -2325,14 +2325,14 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixunsdfti at plt
+; CHECK-V-NEXT:    call __fixunsdfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixunsdfti at plt
+; CHECK-V-NEXT:    call __fixunsdfti
 ; CHECK-V-NEXT:    snez a1, a1
 ; CHECK-V-NEXT:    snez a2, s1
 ; CHECK-V-NEXT:    addi a2, a2, -1
@@ -2373,11 +2373,11 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.d fs0, fa1
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.d fa0, fs0
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv a2, s1
 ; CHECK-NOV-NEXT:    blez s1, .LBB20_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -2437,14 +2437,14 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    mv a2, s1
 ; CHECK-V-NEXT:    blez s1, .LBB20_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
@@ -2514,11 +2514,11 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa1
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.s fa0, fs0
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, a0
 ; CHECK-NOV-NEXT:    li a0, -1
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
@@ -2599,14 +2599,14 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    li a2, -1
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB21_3
@@ -2694,11 +2694,11 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa1
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.s fa0, fs0
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    snez a1, a1
 ; CHECK-NOV-NEXT:    snez a2, s1
 ; CHECK-NOV-NEXT:    addi a2, a2, -1
@@ -2732,14 +2732,14 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    snez a1, a1
 ; CHECK-V-NEXT:    snez a2, s1
 ; CHECK-V-NEXT:    addi a2, a2, -1
@@ -2780,11 +2780,11 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa1
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.s fa0, fs0
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, s1
 ; CHECK-NOV-NEXT:    blez s1, .LBB23_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -2844,14 +2844,14 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv a2, s1
 ; CHECK-V-NEXT:    blez s1, .LBB23_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
@@ -2922,13 +2922,13 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s2, -32
 ; CHECK-NOV-NEXT:    mv s2, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, a0
 ; CHECK-NOV-NEXT:    li a0, -1
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
@@ -3004,13 +3004,13 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    .cfi_offset s2, -32
 ; CHECK-V-NEXT:    mv s2, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    li a2, -1
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB24_3
@@ -3097,13 +3097,13 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s2, -32
 ; CHECK-NOV-NEXT:    mv s0, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    mv s1, a0
 ; CHECK-NOV-NEXT:    mv s2, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    snez a1, a1
 ; CHECK-NOV-NEXT:    snez a2, s2
 ; CHECK-NOV-NEXT:    addi a2, a2, -1
@@ -3132,13 +3132,13 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    .cfi_offset s2, -32
 ; CHECK-V-NEXT:    mv s0, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    mv s1, a0
 ; CHECK-V-NEXT:    mv s2, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    snez a1, a1
 ; CHECK-V-NEXT:    snez a2, s2
 ; CHECK-V-NEXT:    addi a2, a2, -1
@@ -3178,13 +3178,13 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s2, -32
 ; CHECK-NOV-NEXT:    mv s2, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, s1
 ; CHECK-NOV-NEXT:    blez s1, .LBB26_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -3239,13 +3239,13 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    .cfi_offset s2, -32
 ; CHECK-V-NEXT:    mv s2, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv a2, s1
 ; CHECK-V-NEXT:    blez s1, .LBB26_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
@@ -3639,17 +3639,17 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 16(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    lui a1, 524288
 ; CHECK-NOV-NEXT:    addiw a4, a1, -1
@@ -3733,14 +3733,14 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    lhu s2, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -3753,7 +3753,7 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -3769,7 +3769,7 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -3825,17 +3825,17 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 8(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.lu.s s1, fs2, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    li a1, -1
 ; CHECK-NOV-NEXT:    srli a1, a1, 32
@@ -3899,14 +3899,14 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    lhu s2, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -3919,7 +3919,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -3935,7 +3935,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -3990,17 +3990,17 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 16(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    li a2, -1
 ; CHECK-NOV-NEXT:    srli a2, a2, 32
@@ -4076,14 +4076,14 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    lhu s2, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4096,7 +4096,7 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4112,7 +4112,7 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4508,29 +4508,29 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 48(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs6, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs5, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs4, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs3, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    lui a7, 8
 ; CHECK-NOV-NEXT:    addiw a7, a7, -1
@@ -4688,14 +4688,14 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s6, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4704,7 +4704,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4713,7 +4713,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4722,7 +4722,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s3
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4731,7 +4731,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4740,7 +4740,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 5
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4749,7 +4749,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 6
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4826,31 +4826,31 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 8(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs5, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs6, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs4, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs3, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fcvt.lu.s s2, fs6, rtz
 ; CHECK-NOV-NEXT:    fcvt.lu.s a0, fs5, rtz
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    sext.w s1, a0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    sext.w a0, a0
 ; CHECK-NOV-NEXT:    lui a1, 16
@@ -4962,14 +4962,14 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s6, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4978,7 +4978,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4987,7 +4987,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -4996,7 +4996,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s3
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5005,7 +5005,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5014,7 +5014,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 5
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5023,7 +5023,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 6
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5099,29 +5099,29 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    lhu a1, 48(a1)
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs6, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs5, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs4, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs3, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs2, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs1, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
 ; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
 ; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-NOV-NEXT:    lui a3, 16
 ; CHECK-NOV-NEXT:    addiw a3, a3, -1
@@ -5261,14 +5261,14 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s6, 0(a0)
 ; CHECK-V-NEXT:    lhu a0, 8(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5277,7 +5277,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5286,7 +5286,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5295,7 +5295,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s3
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5304,7 +5304,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5313,7 +5313,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 5
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5322,7 +5322,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 6
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    call __extendhfsf2
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
@@ -5372,11 +5372,11 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.d fs0, fa1
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.d fa0, fs0
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv a2, a0
 ; CHECK-NOV-NEXT:    li a0, -1
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
@@ -5460,14 +5460,14 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    li a2, -1
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB45_2
@@ -5557,11 +5557,11 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.d fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.d fa0, fa1
-; CHECK-NOV-NEXT:    call __fixunsdfti at plt
+; CHECK-NOV-NEXT:    call __fixunsdfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.d fa0, fs0
-; CHECK-NOV-NEXT:    call __fixunsdfti at plt
+; CHECK-NOV-NEXT:    call __fixunsdfti
 ; CHECK-NOV-NEXT:    snez a1, a1
 ; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    and a0, a1, a0
@@ -5593,7 +5593,7 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixunsdfti at plt
+; CHECK-V-NEXT:    call __fixunsdfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -5601,7 +5601,7 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixunsdfti at plt
+; CHECK-V-NEXT:    call __fixunsdfti
 ; CHECK-V-NEXT:    snez a1, a1
 ; CHECK-V-NEXT:    addi a1, a1, -1
 ; CHECK-V-NEXT:    and a0, a1, a0
@@ -5641,11 +5641,11 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.d fs0, fa1
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.d fa0, fs0
-; CHECK-NOV-NEXT:    call __fixdfti at plt
+; CHECK-NOV-NEXT:    call __fixdfti
 ; CHECK-NOV-NEXT:    mv a2, a1
 ; CHECK-NOV-NEXT:    blez a1, .LBB47_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -5694,14 +5694,14 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixdfti at plt
+; CHECK-V-NEXT:    call __fixdfti
 ; CHECK-V-NEXT:    mv a2, a1
 ; CHECK-V-NEXT:    blez a1, .LBB47_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
@@ -5758,11 +5758,11 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa1
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.s fa0, fs0
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, a0
 ; CHECK-NOV-NEXT:    li a0, -1
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
@@ -5846,14 +5846,14 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    li a2, -1
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB48_2
@@ -5943,11 +5943,11 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa0
 ; CHECK-NOV-NEXT:    fmv.s fa0, fa1
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.s fa0, fs0
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    snez a1, a1
 ; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    and a0, a1, a0
@@ -5979,7 +5979,7 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -5987,7 +5987,7 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    snez a1, a1
 ; CHECK-V-NEXT:    addi a1, a1, -1
 ; CHECK-V-NEXT:    and a0, a1, a0
@@ -6027,11 +6027,11 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s1, -24
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
 ; CHECK-NOV-NEXT:    fmv.s fs0, fa1
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.s fa0, fs0
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, a1
 ; CHECK-NOV-NEXT:    blez a1, .LBB50_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -6080,14 +6080,14 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v9
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-V-NEXT:    addi a0, sp, 32
 ; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vfmv.f.s fa0, v8
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv a2, a1
 ; CHECK-V-NEXT:    blez a1, .LBB50_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
@@ -6145,13 +6145,13 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s2, -32
 ; CHECK-NOV-NEXT:    mv s2, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, a0
 ; CHECK-NOV-NEXT:    li a0, -1
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
@@ -6230,13 +6230,13 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    .cfi_offset s2, -32
 ; CHECK-V-NEXT:    mv s2, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    li a2, -1
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB51_2
@@ -6324,13 +6324,13 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s2, -32
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    mv s1, a0
 ; CHECK-NOV-NEXT:    mv s2, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixunssfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixunssfti
 ; CHECK-NOV-NEXT:    snez a1, a1
 ; CHECK-NOV-NEXT:    addi a1, a1, -1
 ; CHECK-NOV-NEXT:    and a0, a1, a0
@@ -6358,13 +6358,13 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    .cfi_offset s2, -32
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    fmv.w.x fa0, a1
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    mv s1, a0
 ; CHECK-V-NEXT:    mv s2, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixunssfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixunssfti
 ; CHECK-V-NEXT:    snez a1, a1
 ; CHECK-V-NEXT:    addi a1, a1, -1
 ; CHECK-V-NEXT:    and a0, a1, a0
@@ -6403,13 +6403,13 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset s2, -32
 ; CHECK-NOV-NEXT:    mv s2, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv s0, a0
 ; CHECK-NOV-NEXT:    mv s1, a1
 ; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
-; CHECK-NOV-NEXT:    call __extendhfsf2 at plt
-; CHECK-NOV-NEXT:    call __fixsfti at plt
+; CHECK-NOV-NEXT:    call __extendhfsf2
+; CHECK-NOV-NEXT:    call __fixsfti
 ; CHECK-NOV-NEXT:    mv a2, a1
 ; CHECK-NOV-NEXT:    blez a1, .LBB53_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
@@ -6453,13 +6453,13 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    .cfi_offset s2, -32
 ; CHECK-V-NEXT:    mv s2, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv s0, a0
 ; CHECK-V-NEXT:    mv s1, a1
 ; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    call __fixsfti at plt
+; CHECK-V-NEXT:    call __extendhfsf2
+; CHECK-V-NEXT:    call __fixsfti
 ; CHECK-V-NEXT:    mv a2, a1
 ; CHECK-V-NEXT:    blez a1, .LBB53_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
index de44fbc04e3a63..bf78329c261fac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
@@ -32,7 +32,7 @@
   ; CHECK-NEXT:    addi a0, a0, 241
   ; CHECK-NEXT:    vs1r.v v25, (a0) # Unknown-size Folded Spill
   ; CHECK-NEXT:    ld a0, 8(sp)
-  ; CHECK-NEXT:    call spillslot at plt
+  ; CHECK-NEXT:    call spillslot
   ; CHECK-NEXT:    addi sp, s0, -2048
   ; CHECK-NEXT:    addi sp, sp, -256
   ; CHECK-NEXT:    addi sp, sp, 272
diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
index 8c9a749d5ea16f..1ee88f897b6eb3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
@@ -215,7 +215,7 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
 ; RV64IV-NEXT:    slli s1, s1, 1
 ; RV64IV-NEXT:    sub s1, s0, s1
 ; RV64IV-NEXT:    addi s1, s1, -32
-; RV64IV-NEXT:    call notdead at plt
+; RV64IV-NEXT:    call notdead
 ; RV64IV-NEXT:    vl2r.v v8, (s1)
 ; RV64IV-NEXT:    csrr a0, vlenb
 ; RV64IV-NEXT:    slli a0, a0, 2
@@ -270,7 +270,7 @@ define void @local_var_m2_with_bp(i64 %n) {
 ; RV64IV-NEXT:    slli s2, s2, 1
 ; RV64IV-NEXT:    add s2, s1, s2
 ; RV64IV-NEXT:    addi s2, s2, 224
-; RV64IV-NEXT:    call notdead2 at plt
+; RV64IV-NEXT:    call notdead2
 ; RV64IV-NEXT:    lw zero, 124(s1)
 ; RV64IV-NEXT:    vl2r.v v8, (s2)
 ; RV64IV-NEXT:    addi a0, s1, 224
diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
index 5cd9c374ed411e..bdfec92d2305e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
@@ -58,7 +58,7 @@ define <vscale x 64 x i8> @caller() {
 ; RV64IV-NEXT:    addi a1, sp, 64
 ; RV64IV-NEXT:    addi a0, sp, 64
 ; RV64IV-NEXT:    vs8r.v v24, (a1)
-; RV64IV-NEXT:    call callee at plt
+; RV64IV-NEXT:    call callee
 ; RV64IV-NEXT:    addi sp, s0, -80
 ; RV64IV-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
index 705ec2df126b01..47b88ba71d5560 100644
--- a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
@@ -38,7 +38,7 @@ define signext i32 @foo(i32 signext %aa) #0 {
 ; CHECK-NEXT:    addi a1, s1, 48
 ; CHECK-NEXT:    sd t1, 0(sp)
 ; CHECK-NEXT:    mv a0, t0
-; CHECK-NEXT:    call gfunc at plt
+; CHECK-NEXT:    call gfunc
 ; CHECK-NEXT:    addi sp, sp, 32
 ; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    addi sp, s0, -96
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
index 65dca0daed8c77..c27488b18a017a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
@@ -14,16 +14,16 @@ define <4 x float> @foo(ptr %0) nounwind {
 ; CHECK-NEXT:    lhu s2, 0(a0)
 ; CHECK-NEXT:    lhu a0, 2(a0)
 ; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fsw fa0, 8(sp)
 ; CHECK-NEXT:    fmv.w.x fa0, s2
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fsw fa0, 0(sp)
 ; CHECK-NEXT:    fmv.w.x fa0, s1
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fsw fa0, 12(sp)
 ; CHECK-NEXT:    fmv.w.x fa0, s0
-; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    call __extendhfsf2
 ; CHECK-NEXT:    fsw fa0, 4(sp)
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
index 978d1c8fb7b0e0..600ac594380b39 100644
--- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
@@ -38,7 +38,7 @@ define void @foo(ptr nocapture noundef %p1) {
 ; CHECK-NEXT:    li a6, 7
 ; CHECK-NEXT:    li a7, 8
 ; CHECK-NEXT:    sd t0, 0(sp)
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (s2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index c44f5ebcde4829..129fbcfb88327b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -28,7 +28,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
 ; SPILL-O0-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; SPILL-O0-NEXT:    lui a0, %hi(.L.str)
 ; SPILL-O0-NEXT:    addi a0, a0, %lo(.L.str)
-; SPILL-O0-NEXT:    call puts at plt
+; SPILL-O0-NEXT:    call puts
 ; SPILL-O0-NEXT:    addi a1, sp, 16
 ; SPILL-O0-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
 ; SPILL-O0-NEXT:    csrr a1, vlenb
@@ -66,7 +66,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
 ; SPILL-O2-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
 ; SPILL-O2-NEXT:    lui a0, %hi(.L.str)
 ; SPILL-O2-NEXT:    addi a0, a0, %lo(.L.str)
-; SPILL-O2-NEXT:    call puts at plt
+; SPILL-O2-NEXT:    call puts
 ; SPILL-O2-NEXT:    vsetvli zero, s0, e64, m1, ta, ma
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index 8beffdd04f98db..34eb58ee4d1c4b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -31,7 +31,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
 ; SPILL-O0-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; SPILL-O0-NEXT:    lui a0, %hi(.L.str)
 ; SPILL-O0-NEXT:    addi a0, a0, %lo(.L.str)
-; SPILL-O0-NEXT:    call puts at plt
+; SPILL-O0-NEXT:    call puts
 ; SPILL-O0-NEXT:    addi a1, sp, 32
 ; SPILL-O0-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
 ; SPILL-O0-NEXT:    csrr a1, vlenb
@@ -69,7 +69,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
 ; SPILL-O2-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
 ; SPILL-O2-NEXT:    lui a0, %hi(.L.str)
 ; SPILL-O2-NEXT:    addi a0, a0, %lo(.L.str)
-; SPILL-O2-NEXT:    call puts at plt
+; SPILL-O2-NEXT:    call puts
 ; SPILL-O2-NEXT:    vsetvli zero, s0, e64, m1, ta, ma
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    add a0, sp, a0
@@ -101,7 +101,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
 ; SPILL-O2-VLEN128-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
 ; SPILL-O2-VLEN128-NEXT:    lui a0, %hi(.L.str)
 ; SPILL-O2-VLEN128-NEXT:    addi a0, a0, %lo(.L.str)
-; SPILL-O2-VLEN128-NEXT:    call puts at plt
+; SPILL-O2-VLEN128-NEXT:    call puts
 ; SPILL-O2-VLEN128-NEXT:    vsetvli zero, s0, e64, m1, ta, ma
 ; SPILL-O2-VLEN128-NEXT:    addi a0, sp, 32
 ; SPILL-O2-VLEN128-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
index 27a5059e5a9eb8..c164b62a679be0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
@@ -48,7 +48,7 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
 ; CHECK-NEXT:    sd t0, 8(sp)
 ; CHECK-NEXT:    sd t1, 0(sp)
 ; CHECK-NEXT:    vmv8r.v v16, v8
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    addi sp, s0, -96
 ; CHECK-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
index 686401da41a4eb..b8a922a9fb1a00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
@@ -24,7 +24,7 @@
   ; RV32-NEXT:    addi a0, sp, 32
   ; RV32-NEXT:    addi a1, sp, 16
   ; RV32-NEXT:    addi a2, sp, 8
-  ; RV32-NEXT:    call extern at plt
+  ; RV32-NEXT:    call extern
   ; RV32-NEXT:    csrr a0, vlenb
   ; RV32-NEXT:    slli a0, a0, 1
   ; RV32-NEXT:    add sp, sp, a0
@@ -42,7 +42,7 @@
   ; RV64-NEXT:    addi a0, sp, 32
   ; RV64-NEXT:    addi a1, sp, 16
   ; RV64-NEXT:    addi a2, sp, 8
-  ; RV64-NEXT:    call extern at plt
+  ; RV64-NEXT:    call extern
   ; RV64-NEXT:    csrr a0, vlenb
   ; RV64-NEXT:    slli a0, a0, 1
   ; RV64-NEXT:    add sp, sp, a0
@@ -67,7 +67,7 @@
   ; RV32-NEXT:    addi a0, sp, 32
   ; RV32-NEXT:    addi a1, sp, 16
   ; RV32-NEXT:    addi a2, sp, 8
-  ; RV32-NEXT:    call extern at plt
+  ; RV32-NEXT:    call extern
   ; RV32-NEXT:    csrr a0, vlenb
   ; RV32-NEXT:    slli a0, a0, 1
   ; RV32-NEXT:    add sp, sp, a0
@@ -85,7 +85,7 @@
   ; RV64-NEXT:    addi a0, sp, 32
   ; RV64-NEXT:    addi a1, sp, 16
   ; RV64-NEXT:    addi a2, sp, 8
-  ; RV64-NEXT:    call extern at plt
+  ; RV64-NEXT:    call extern
   ; RV64-NEXT:    csrr a0, vlenb
   ; RV64-NEXT:    slli a0, a0, 1
   ; RV64-NEXT:    add sp, sp, a0
@@ -113,7 +113,7 @@
   ; RV32-NEXT:    addi a0, sp, 32
   ; RV32-NEXT:    addi a1, sp, 16
   ; RV32-NEXT:    addi a2, sp, 8
-  ; RV32-NEXT:    call extern at plt
+  ; RV32-NEXT:    call extern
   ; RV32-NEXT:    addi sp, s0, -48
   ; RV32-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
   ; RV32-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
@@ -133,7 +133,7 @@
   ; RV64-NEXT:    addi a0, sp, 64
   ; RV64-NEXT:    addi a1, sp, 40
   ; RV64-NEXT:    addi a2, sp, 32
-  ; RV64-NEXT:    call extern at plt
+  ; RV64-NEXT:    call extern
   ; RV64-NEXT:    addi sp, s0, -80
   ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
   ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
index 76773bb3831305..7aaafe9874fb93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
@@ -19,7 +19,7 @@ define ptr @scalar_stack_align16() nounwind {
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    sub sp, sp, a0
 ; RV32-NEXT:    addi a0, sp, 32
-; RV32-NEXT:    call extern at plt
+; RV32-NEXT:    call extern
 ; RV32-NEXT:    addi a0, sp, 16
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 1
@@ -36,7 +36,7 @@ define ptr @scalar_stack_align16() nounwind {
 ; RV64-NEXT:    slli a0, a0, 1
 ; RV64-NEXT:    sub sp, sp, a0
 ; RV64-NEXT:    addi a0, sp, 32
-; RV64-NEXT:    call extern at plt
+; RV64-NEXT:    call extern
 ; RV64-NEXT:    addi a0, sp, 16
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    slli a1, a1, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 73f651225da64d..07fcec13146cbb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -343,7 +343,7 @@ define <vscale x 1 x double> @test8(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT:    call foo at plt
+; CHECK-NEXT:    call foo
 ; CHECK-NEXT:    vsetvli zero, s0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    add a0, a0, sp
@@ -400,7 +400,7 @@ define <vscale x 1 x double> @test9(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:    add a0, a0, sp
 ; CHECK-NEXT:    addi a0, a0, 16
 ; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT:    call foo at plt
+; CHECK-NEXT:    call foo
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
index e323bc69ee9008..215b1ddd5de353 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -83,7 +83,7 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; RV32-NEXT:    csrwi vxrm, 0
 ; RV32-NEXT:    vaadd.vv v8, v8, v9
-; RV32-NEXT:    call foo at plt
+; RV32-NEXT:    call foo
 ; RV32-NEXT:    vsetvli zero, s0, e8, mf8, ta, ma
 ; RV32-NEXT:    csrwi vxrm, 0
 ; RV32-NEXT:    addi a0, sp, 16
@@ -111,7 +111,7 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
 ; RV64-NEXT:    vaadd.vv v8, v8, v9
-; RV64-NEXT:    call foo at plt
+; RV64-NEXT:    call foo
 ; RV64-NEXT:    vsetvli zero, s0, e8, mf8, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
 ; RV64-NEXT:    addi a0, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/select-and.ll b/llvm/test/CodeGen/RISCV/select-and.ll
index 5ba489043f0b44..d305993f0e966b 100644
--- a/llvm/test/CodeGen/RISCV/select-and.ll
+++ b/llvm/test/CodeGen/RISCV/select-and.ll
@@ -44,10 +44,10 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    beqz a1, .LBB1_3
 ; RV32I-NEXT:  # %bb.2: # %if.then
-; RV32I-NEXT:    call both at plt
+; RV32I-NEXT:    call both
 ; RV32I-NEXT:    j .LBB1_4
 ; RV32I-NEXT:  .LBB1_3: # %if.else
-; RV32I-NEXT:    call neither at plt
+; RV32I-NEXT:    call neither
 ; RV32I-NEXT:  .LBB1_4: # %if.end
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -61,10 +61,10 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    beqz a1, .LBB1_3
 ; RV64I-NEXT:  # %bb.2: # %if.then
-; RV64I-NEXT:    call both at plt
+; RV64I-NEXT:    call both
 ; RV64I-NEXT:    j .LBB1_4
 ; RV64I-NEXT:  .LBB1_3: # %if.else
-; RV64I-NEXT:    call neither at plt
+; RV64I-NEXT:    call neither
 ; RV64I-NEXT:  .LBB1_4: # %if.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index 01ad7b338ce7f6..31e25702da8ba2 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -230,7 +230,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV32I-NEXT:    beqz a1, .LBB1_4
 ; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call bar at plt
+; RV32I-NEXT:    call bar
 ; RV32I-NEXT:  .LBB1_4:
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -251,7 +251,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
 ; RV64I-NEXT:    beqz a1, .LBB1_4
 ; RV64I-NEXT:  # %bb.3:
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call bar at plt
+; RV64I-NEXT:    call bar
 ; RV64I-NEXT:  .LBB1_4:
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/select-or.ll b/llvm/test/CodeGen/RISCV/select-or.ll
index d378bb4dd563de..20a5ec15290cdb 100644
--- a/llvm/test/CodeGen/RISCV/select-or.ll
+++ b/llvm/test/CodeGen/RISCV/select-or.ll
@@ -44,10 +44,10 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    bnez a1, .LBB1_3
 ; RV32I-NEXT:  # %bb.2: # %if.else
-; RV32I-NEXT:    call neither at plt
+; RV32I-NEXT:    call neither
 ; RV32I-NEXT:    j .LBB1_4
 ; RV32I-NEXT:  .LBB1_3: # %if.then
-; RV32I-NEXT:    call either at plt
+; RV32I-NEXT:    call either
 ; RV32I-NEXT:  .LBB1_4: # %if.end
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -61,10 +61,10 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    bnez a1, .LBB1_3
 ; RV64I-NEXT:  # %bb.2: # %if.else
-; RV64I-NEXT:    call neither at plt
+; RV64I-NEXT:    call neither
 ; RV64I-NEXT:    j .LBB1_4
 ; RV64I-NEXT:  .LBB1_3: # %if.then
-; RV64I-NEXT:    call either at plt
+; RV64I-NEXT:    call either
 ; RV64I-NEXT:  .LBB1_4: # %if.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll
index 14e76d1ff17288..2b87026e98e6a8 100644
--- a/llvm/test/CodeGen/RISCV/setcc-logic.ll
+++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll
@@ -304,7 +304,7 @@ define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB13_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_sge_eq:
 ; RV64I:       # %bb.0:
@@ -314,7 +314,7 @@ define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB13_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp sge i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = and i1 %5, %6
@@ -337,7 +337,7 @@ define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB14_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_sle_eq:
 ; RV64I:       # %bb.0:
@@ -347,7 +347,7 @@ define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB14_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp sle i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = and i1 %5, %6
@@ -370,7 +370,7 @@ define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB15_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_uge_eq:
 ; RV64I:       # %bb.0:
@@ -380,7 +380,7 @@ define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB15_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp uge i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = and i1 %5, %6
@@ -403,7 +403,7 @@ define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB16_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_ule_eq:
 ; RV64I:       # %bb.0:
@@ -413,7 +413,7 @@ define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB16_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp ule i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = and i1 %5, %6
@@ -436,7 +436,7 @@ define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB17_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_sge_ne:
 ; RV64I:       # %bb.0:
@@ -446,7 +446,7 @@ define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB17_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp sge i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = and i1 %5, %6
@@ -469,7 +469,7 @@ define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB18_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_sle_ne:
 ; RV64I:       # %bb.0:
@@ -479,7 +479,7 @@ define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB18_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp sle i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = and i1 %5, %6
@@ -502,7 +502,7 @@ define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB19_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_uge_ne:
 ; RV64I:       # %bb.0:
@@ -512,7 +512,7 @@ define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB19_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp uge i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = and i1 %5, %6
@@ -535,7 +535,7 @@ define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB20_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_ule_ne:
 ; RV64I:       # %bb.0:
@@ -545,7 +545,7 @@ define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB20_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp ule i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = and i1 %5, %6
@@ -566,7 +566,7 @@ define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    beq a2, a3, .LBB21_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB21_3:
 ; RV32I-NEXT:    ret
 ;
@@ -576,7 +576,7 @@ define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    beq a2, a3, .LBB21_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB21_3:
 ; RV64I-NEXT:    ret
   %5 = icmp sge i32 %0, %1
@@ -599,7 +599,7 @@ define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    beq a2, a3, .LBB22_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB22_3:
 ; RV32I-NEXT:    ret
 ;
@@ -609,7 +609,7 @@ define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    beq a2, a3, .LBB22_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB22_3:
 ; RV64I-NEXT:    ret
   %5 = icmp sle i32 %0, %1
@@ -632,7 +632,7 @@ define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    beq a2, a3, .LBB23_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB23_3:
 ; RV32I-NEXT:    ret
 ;
@@ -642,7 +642,7 @@ define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    beq a2, a3, .LBB23_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB23_3:
 ; RV64I-NEXT:    ret
   %5 = icmp uge i32 %0, %1
@@ -665,7 +665,7 @@ define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    beq a2, a3, .LBB24_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB24_3:
 ; RV32I-NEXT:    ret
 ;
@@ -675,7 +675,7 @@ define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    beq a2, a3, .LBB24_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB24_3:
 ; RV64I-NEXT:    ret
   %5 = icmp ule i32 %0, %1
@@ -698,7 +698,7 @@ define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    bne a2, a3, .LBB25_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB25_3:
 ; RV32I-NEXT:    ret
 ;
@@ -708,7 +708,7 @@ define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    bne a2, a3, .LBB25_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB25_3:
 ; RV64I-NEXT:    ret
   %5 = icmp sge i32 %0, %1
@@ -731,7 +731,7 @@ define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    bne a2, a3, .LBB26_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB26_3:
 ; RV32I-NEXT:    ret
 ;
@@ -741,7 +741,7 @@ define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    bne a2, a3, .LBB26_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB26_3:
 ; RV64I-NEXT:    ret
   %5 = icmp sle i32 %0, %1
@@ -764,7 +764,7 @@ define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    bne a2, a3, .LBB27_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB27_3:
 ; RV32I-NEXT:    ret
 ;
@@ -774,7 +774,7 @@ define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    bne a2, a3, .LBB27_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB27_3:
 ; RV64I-NEXT:    ret
   %5 = icmp uge i32 %0, %1
@@ -797,7 +797,7 @@ define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    bne a2, a3, .LBB28_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB28_3:
 ; RV32I-NEXT:    ret
 ;
@@ -807,7 +807,7 @@ define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    bne a2, a3, .LBB28_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB28_3:
 ; RV64I-NEXT:    ret
   %5 = icmp ule i32 %0, %1
@@ -832,7 +832,7 @@ define void @and_eq_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB29_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_eq_sge:
 ; RV64I:       # %bb.0:
@@ -842,7 +842,7 @@ define void @and_eq_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB29_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp eq  i32 %0, %1
   %6 = icmp sge i32 %2, %3
   %7 = and i1 %5, %6
@@ -865,7 +865,7 @@ define void @and_eq_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB30_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_eq_sle:
 ; RV64I:       # %bb.0:
@@ -875,7 +875,7 @@ define void @and_eq_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB30_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp eq  i32 %0, %1
   %6 = icmp sle i32 %2, %3
   %7 = and i1 %5, %6
@@ -898,7 +898,7 @@ define void @and_eq_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB31_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_eq_uge:
 ; RV64I:       # %bb.0:
@@ -908,7 +908,7 @@ define void @and_eq_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB31_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp eq  i32 %0, %1
   %6 = icmp uge i32 %2, %3
   %7 = and i1 %5, %6
@@ -931,7 +931,7 @@ define void @and_eq_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB32_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_eq_ule:
 ; RV64I:       # %bb.0:
@@ -941,7 +941,7 @@ define void @and_eq_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB32_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp eq  i32 %0, %1
   %6 = icmp ule i32 %2, %3
   %7 = and i1 %5, %6
@@ -964,7 +964,7 @@ define void @and_ne_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB33_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_ne_sge:
 ; RV64I:       # %bb.0:
@@ -974,7 +974,7 @@ define void @and_ne_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB33_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp ne  i32 %0, %1
   %6 = icmp sge i32 %2, %3
   %7 = and i1 %5, %6
@@ -997,7 +997,7 @@ define void @and_ne_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB34_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_ne_sle:
 ; RV64I:       # %bb.0:
@@ -1007,7 +1007,7 @@ define void @and_ne_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB34_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp ne  i32 %0, %1
   %6 = icmp sle i32 %2, %3
   %7 = and i1 %5, %6
@@ -1030,7 +1030,7 @@ define void @and_ne_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB35_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_ne_uge:
 ; RV64I:       # %bb.0:
@@ -1040,7 +1040,7 @@ define void @and_ne_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB35_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp ne  i32 %0, %1
   %6 = icmp uge i32 %2, %3
   %7 = and i1 %5, %6
@@ -1063,7 +1063,7 @@ define void @and_ne_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB36_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_ne_ule:
 ; RV64I:       # %bb.0:
@@ -1073,7 +1073,7 @@ define void @and_ne_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB36_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %5 = icmp ne  i32 %0, %1
   %6 = icmp ule i32 %2, %3
   %7 = and i1 %5, %6
@@ -1096,7 +1096,7 @@ define void @and_sge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB37_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_sge_gt0:
 ; RV64I:       # %bb.0:
@@ -1106,7 +1106,7 @@ define void @and_sge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB37_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %4 = icmp sge i32 %0, %1
   %5 = icmp sgt i32 %2, 0
   %6 = and i1 %4, %5
@@ -1129,7 +1129,7 @@ define void @and_sle_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB38_3:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ;
 ; RV64I-LABEL: and_sle_lt1:
 ; RV64I:       # %bb.0:
@@ -1139,7 +1139,7 @@ define void @and_sle_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB38_3:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
   %4 = icmp sle i32 %0, %1
   %5 = icmp slt i32 %2, 1
   %6 = and i1 %4, %5
@@ -1160,7 +1160,7 @@ define void @or_uge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    bgtz a2, .LBB39_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB39_3:
 ; RV32I-NEXT:    ret
 ;
@@ -1170,7 +1170,7 @@ define void @or_uge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    bgtz a2, .LBB39_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB39_3:
 ; RV64I-NEXT:    ret
   %4 = icmp uge i32 %0, %1
@@ -1193,7 +1193,7 @@ define void @or_ule_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    blez a2, .LBB40_3
 ; RV32I-NEXT:  # %bb.2:
-; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:    tail bar
 ; RV32I-NEXT:  .LBB40_3:
 ; RV32I-NEXT:    ret
 ;
@@ -1203,7 +1203,7 @@ define void @or_ule_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    blez a2, .LBB40_3
 ; RV64I-NEXT:  # %bb.2:
-; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:    tail bar
 ; RV64I-NEXT:  .LBB40_3:
 ; RV64I-NEXT:    ret
   %4 = icmp ule i32 %0, %1
diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll
index e730092674e06a..9c56c03f3609c6 100644
--- a/llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -18,7 +18,7 @@ define void @test1(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:  .LBB0_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mv a0, s1
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    sllw s1, s1, s0
 ; CHECK-NEXT:    bnez a0, .LBB0_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
@@ -39,7 +39,7 @@ define void @test1(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB0_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, s1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    sllw s1, s1, s0
 ; NOREMOVAL-NEXT:    bnez a0, .LBB0_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -186,7 +186,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
 ; RV64I-NEXT:    addi s3, a1, 257
 ; RV64I-NEXT:  .LBB4_1: # %bb2
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT:    call bar at plt
+; RV64I-NEXT:    call bar
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    and a0, a0, s0
@@ -217,7 +217,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
 ; RV64ZBB-NEXT:    sraw a0, a0, a1
 ; RV64ZBB-NEXT:  .LBB4_1: # %bb2
 ; RV64ZBB-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64ZBB-NEXT:    call bar at plt
+; RV64ZBB-NEXT:    call bar
 ; RV64ZBB-NEXT:    mv a1, a0
 ; RV64ZBB-NEXT:    cpopw a0, a0
 ; RV64ZBB-NEXT:    bnez a1, .LBB4_1
@@ -234,7 +234,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB4_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, a1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    cpopw a1, a0
 ; NOREMOVAL-NEXT:    bnez a0, .LBB4_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -268,7 +268,7 @@ define void @test6(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    fmv.w.x fs0, zero
 ; CHECK-NEXT:  .LBB5_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    call baz at plt
+; CHECK-NEXT:    call baz
 ; CHECK-NEXT:    feq.s a1, fa0, fs0
 ; CHECK-NEXT:    fcvt.w.s a0, fa0, rtz
 ; CHECK-NEXT:    beqz a1, .LBB5_1
@@ -288,7 +288,7 @@ define void @test6(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB5_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, a0
-; NOREMOVAL-NEXT:    call baz at plt
+; NOREMOVAL-NEXT:    call baz
 ; NOREMOVAL-NEXT:    feq.s a1, fa0, fs0
 ; NOREMOVAL-NEXT:    fcvt.w.s a0, fa0, rtz
 ; NOREMOVAL-NEXT:    beqz a1, .LBB5_1
@@ -341,7 +341,7 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
 ; RV64I-NEXT:    add s3, s3, a1
 ; RV64I-NEXT:  .LBB6_1: # %bb2
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT:    call foo at plt
+; RV64I-NEXT:    call foo
 ; RV64I-NEXT:    srli a1, a0, 1
 ; RV64I-NEXT:    and a1, a1, s0
 ; RV64I-NEXT:    sub a0, a0, a1
@@ -371,7 +371,7 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
 ; RV64ZBB-NEXT:    sraw a0, a0, a1
 ; RV64ZBB-NEXT:  .LBB6_1: # %bb2
 ; RV64ZBB-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64ZBB-NEXT:    call foo at plt
+; RV64ZBB-NEXT:    call foo
 ; RV64ZBB-NEXT:    cpop a0, a0
 ; RV64ZBB-NEXT:    bnez a0, .LBB6_1
 ; RV64ZBB-NEXT:  # %bb.2: # %bb7
@@ -387,7 +387,7 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB6_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, a0
-; NOREMOVAL-NEXT:    call foo at plt
+; NOREMOVAL-NEXT:    call foo
 ; NOREMOVAL-NEXT:    cpop a0, a0
 ; NOREMOVAL-NEXT:    bnez a0, .LBB6_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -420,7 +420,7 @@ define void @test8(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    sraw a0, a0, a1
 ; CHECK-NEXT:  .LBB7_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    call foo at plt
+; CHECK-NEXT:    call foo
 ; CHECK-NEXT:    ori a0, a0, -256
 ; CHECK-NEXT:    bnez a0, .LBB7_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
@@ -436,7 +436,7 @@ define void @test8(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB7_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, a0
-; NOREMOVAL-NEXT:    call foo at plt
+; NOREMOVAL-NEXT:    call foo
 ; NOREMOVAL-NEXT:    ori a0, a0, -256
 ; NOREMOVAL-NEXT:    bnez a0, .LBB7_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -471,7 +471,7 @@ define void @test9(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    li s0, 254
 ; CHECK-NEXT:  .LBB8_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    mv a1, a0
 ; CHECK-NEXT:    slti a0, a0, 255
 ; CHECK-NEXT:    blt s0, a1, .LBB8_1
@@ -491,7 +491,7 @@ define void @test9(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB8_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, a1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    slti a1, a0, 255
 ; NOREMOVAL-NEXT:    blt s0, a0, .LBB8_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -525,7 +525,7 @@ define void @test10(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    fmv.w.x fs0, zero
 ; CHECK-NEXT:  .LBB9_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    call baz at plt
+; CHECK-NEXT:    call baz
 ; CHECK-NEXT:    feq.s a1, fa0, fs0
 ; CHECK-NEXT:    fmv.x.w a0, fa0
 ; CHECK-NEXT:    beqz a1, .LBB9_1
@@ -545,7 +545,7 @@ define void @test10(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB9_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, a0
-; NOREMOVAL-NEXT:    call baz at plt
+; NOREMOVAL-NEXT:    call baz
 ; NOREMOVAL-NEXT:    feq.s a1, fa0, fs0
 ; NOREMOVAL-NEXT:    fmv.x.w a0, fa0
 ; NOREMOVAL-NEXT:    beqz a1, .LBB9_1
@@ -1152,12 +1152,12 @@ define void @test16(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a1
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    mv s1, a0
 ; CHECK-NEXT:  .LBB19_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mv a0, s1
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    sllw s1, s1, s0
 ; CHECK-NEXT:    bnez a0, .LBB19_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
@@ -1174,12 +1174,12 @@ define void @test16(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; NOREMOVAL-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; NOREMOVAL-NEXT:    mv s0, a1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    mv s1, a0
 ; NOREMOVAL-NEXT:  .LBB19_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, s1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    sllw s1, s1, s0
 ; NOREMOVAL-NEXT:    bnez a0, .LBB19_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -1211,12 +1211,12 @@ define void @test17(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a1
-; CHECK-NEXT:    call bat at plt
+; CHECK-NEXT:    call bat
 ; CHECK-NEXT:    mv s1, a0
 ; CHECK-NEXT:  .LBB20_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mv a0, s1
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    sllw s1, s1, s0
 ; CHECK-NEXT:    bnez a0, .LBB20_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
@@ -1233,12 +1233,12 @@ define void @test17(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; NOREMOVAL-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; NOREMOVAL-NEXT:    mv s0, a1
-; NOREMOVAL-NEXT:    call bat at plt
+; NOREMOVAL-NEXT:    call bat
 ; NOREMOVAL-NEXT:    mv s1, a0
 ; NOREMOVAL-NEXT:  .LBB20_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, s1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    sllw s1, s1, s0
 ; NOREMOVAL-NEXT:    bnez a0, .LBB20_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -1276,7 +1276,7 @@ define void @test18(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:  .LBB21_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mv a0, s1
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    sllw s1, s1, s0
 ; CHECK-NEXT:    bnez a0, .LBB21_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
@@ -1297,7 +1297,7 @@ define void @test18(i32 signext %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB21_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, s1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    sllw s1, s1, s0
 ; NOREMOVAL-NEXT:    bnez a0, .LBB21_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
@@ -1385,10 +1385,10 @@ define signext i32 @test19(i64 %arg, i1 zeroext %c1, i1 zeroext %c2, ptr %p) nou
 ; CHECK-NEXT:    beqz a2, .LBB23_2
 ; CHECK-NEXT:  # %bb.1: # %bb2
 ; CHECK-NEXT:    li a0, 0
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    mv s0, a0
 ; CHECK-NEXT:  .LBB23_2: # %bb7
-; CHECK-NEXT:    call side_effect at plt
+; CHECK-NEXT:    call side_effect
 ; CHECK-NEXT:    sext.w a0, s0
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -1409,10 +1409,10 @@ define signext i32 @test19(i64 %arg, i1 zeroext %c1, i1 zeroext %c2, ptr %p) nou
 ; NOREMOVAL-NEXT:    beqz a2, .LBB23_2
 ; NOREMOVAL-NEXT:  # %bb.1: # %bb2
 ; NOREMOVAL-NEXT:    li a0, 0
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    mv s0, a0
 ; NOREMOVAL-NEXT:  .LBB23_2: # %bb7
-; NOREMOVAL-NEXT:    call side_effect at plt
+; NOREMOVAL-NEXT:    call side_effect
 ; NOREMOVAL-NEXT:    sext.w a0, s0
 ; NOREMOVAL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; NOREMOVAL-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -1450,7 +1450,7 @@ define void @test20(<vscale x 1 x i32> %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:  .LBB24_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mv a0, s1
-; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    sllw s1, s1, s0
 ; CHECK-NEXT:    bnez a0, .LBB24_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
@@ -1472,7 +1472,7 @@ define void @test20(<vscale x 1 x i32> %arg, i32 signext %arg1) nounwind {
 ; NOREMOVAL-NEXT:  .LBB24_1: # %bb2
 ; NOREMOVAL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOREMOVAL-NEXT:    sext.w a0, s1
-; NOREMOVAL-NEXT:    call bar at plt
+; NOREMOVAL-NEXT:    call bar
 ; NOREMOVAL-NEXT:    sllw s1, s1, s0
 ; NOREMOVAL-NEXT:    bnez a0, .LBB24_1
 ; NOREMOVAL-NEXT:  # %bb.2: # %bb7
diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index fee067ee3ad141..b41b87aaf4d0d8 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -20,11 +20,11 @@ declare void @foo()
 define void @f2() shadowcallstack {
 ; RV32-LABEL: f2:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    tail foo at plt
+; RV32-NEXT:    tail foo
 ;
 ; RV64-LABEL: f2:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    tail foo at plt
+; RV64-NEXT:    tail foo
   tail call void @foo()
   ret void
 }
@@ -41,7 +41,7 @@ define i32 @f3() shadowcallstack {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    call bar
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    lw ra, -4(gp)
@@ -58,7 +58,7 @@ define i32 @f3() shadowcallstack {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    call bar
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ld ra, -8(gp)
@@ -86,13 +86,13 @@ define i32 @f4() shadowcallstack {
 ; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    .cfi_offset s1, -12
 ; RV32-NEXT:    .cfi_offset s2, -16
-; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    call bar
 ; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    call bar
 ; RV32-NEXT:    mv s1, a0
-; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    call bar
 ; RV32-NEXT:    mv s2, a0
-; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    call bar
 ; RV32-NEXT:    add s0, s0, s1
 ; RV32-NEXT:    add a0, s2, a0
 ; RV32-NEXT:    add a0, s0, a0
@@ -121,13 +121,13 @@ define i32 @f4() shadowcallstack {
 ; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    .cfi_offset s1, -24
 ; RV64-NEXT:    .cfi_offset s2, -32
-; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    call bar
 ; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    call bar
 ; RV64-NEXT:    mv s1, a0
-; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    call bar
 ; RV64-NEXT:    mv s2, a0
-; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    call bar
 ; RV64-NEXT:    add s0, s0, s1
 ; RV64-NEXT:    add a0, s2, a0
 ; RV64-NEXT:    addw a0, s0, a0
@@ -157,7 +157,7 @@ define i32 @f5() shadowcallstack nounwind {
 ; RV32-NEXT:    sw ra, -4(gp)
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    call bar
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    lw ra, -4(gp)
@@ -170,7 +170,7 @@ define i32 @f5() shadowcallstack nounwind {
 ; RV64-NEXT:    sd ra, -8(gp)
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    call bar
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ld ra, -8(gp)
diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index 97121c275a2944..f61cbfd3ed7257 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -43,7 +43,7 @@ define i64 @lshr64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __lshrdi3 at plt
+; RV32I-NEXT:    call __lshrdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -89,7 +89,7 @@ define i64 @ashr64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ashrdi3 at plt
+; RV32I-NEXT:    call __ashrdi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -135,7 +135,7 @@ define i64 @shl64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __ashldi3 at plt
+; RV32I-NEXT:    call __ashldi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
index 725b8fd6eeea6b..d007c245d21b62 100644
--- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
@@ -431,7 +431,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
 ; NOSFB-NEXT:  .LBB15_1: # %bb2
 ; NOSFB-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOSFB-NEXT:    mv a0, s1
-; NOSFB-NEXT:    call bar at plt
+; NOSFB-NEXT:    call bar
 ; NOSFB-NEXT:    sllw s1, s1, s0
 ; NOSFB-NEXT:    bnez a0, .LBB15_1
 ; NOSFB-NEXT:  # %bb.2: # %bb7
@@ -457,7 +457,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
 ; RV64SFB-NEXT:  .LBB15_1: # %bb2
 ; RV64SFB-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64SFB-NEXT:    mv a0, s0
-; RV64SFB-NEXT:    call bar at plt
+; RV64SFB-NEXT:    call bar
 ; RV64SFB-NEXT:    sllw s0, s0, s1
 ; RV64SFB-NEXT:    bnez a0, .LBB15_1
 ; RV64SFB-NEXT:  # %bb.2: # %bb7
@@ -483,7 +483,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
 ; ZICOND-NEXT:  .LBB15_1: # %bb2
 ; ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
 ; ZICOND-NEXT:    mv a0, s0
-; ZICOND-NEXT:    call bar at plt
+; ZICOND-NEXT:    call bar
 ; ZICOND-NEXT:    sllw s0, s0, s1
 ; ZICOND-NEXT:    bnez a0, .LBB15_1
 ; ZICOND-NEXT:  # %bb.2: # %bb7
@@ -509,7 +509,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
 ; RV32SFB-NEXT:  .LBB15_1: # %bb2
 ; RV32SFB-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32SFB-NEXT:    mv a0, s0
-; RV32SFB-NEXT:    call bar at plt
+; RV32SFB-NEXT:    call bar
 ; RV32SFB-NEXT:    sll s0, s0, s1
 ; RV32SFB-NEXT:    bnez a0, .LBB15_1
 ; RV32SFB-NEXT:  # %bb.2: # %bb7
@@ -550,7 +550,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
 ; NOSFB-NEXT:  .LBB16_1: # %bb2
 ; NOSFB-NEXT:    # =>This Inner Loop Header: Depth=1
 ; NOSFB-NEXT:    mv a0, s1
-; NOSFB-NEXT:    call bar at plt
+; NOSFB-NEXT:    call bar
 ; NOSFB-NEXT:    sllw s1, s1, s0
 ; NOSFB-NEXT:    bnez a0, .LBB16_1
 ; NOSFB-NEXT:  # %bb.2: # %bb7
@@ -576,7 +576,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
 ; RV64SFB-NEXT:  .LBB16_1: # %bb2
 ; RV64SFB-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64SFB-NEXT:    mv a0, s0
-; RV64SFB-NEXT:    call bar at plt
+; RV64SFB-NEXT:    call bar
 ; RV64SFB-NEXT:    sllw s0, s0, s1
 ; RV64SFB-NEXT:    bnez a0, .LBB16_1
 ; RV64SFB-NEXT:  # %bb.2: # %bb7
@@ -602,7 +602,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
 ; ZICOND-NEXT:  .LBB16_1: # %bb2
 ; ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
 ; ZICOND-NEXT:    mv a0, s0
-; ZICOND-NEXT:    call bar at plt
+; ZICOND-NEXT:    call bar
 ; ZICOND-NEXT:    sllw s0, s0, s1
 ; ZICOND-NEXT:    bnez a0, .LBB16_1
 ; ZICOND-NEXT:  # %bb.2: # %bb7
@@ -628,7 +628,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
 ; RV32SFB-NEXT:  .LBB16_1: # %bb2
 ; RV32SFB-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32SFB-NEXT:    mv a0, s0
-; RV32SFB-NEXT:    call bar at plt
+; RV32SFB-NEXT:    call bar
 ; RV32SFB-NEXT:    sll s0, s0, s1
 ; RV32SFB-NEXT:    bnez a0, .LBB16_1
 ; RV32SFB-NEXT:  # %bb.2: # %bb7
diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll b/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
index 1c57b0f7e60331..5e557de37423ee 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
@@ -23,21 +23,21 @@ define dso_local signext i32 @test_shrinkwrap_jump_table(ptr noundef %m) local_u
 ; CHECK-NEXT:    lw a1, 0(a1)
 ; CHECK-NEXT:    jr a1
 ; CHECK-NEXT:  .LBB0_2: # %sw.bb
-; CHECK-NEXT:    tail func1 at plt
+; CHECK-NEXT:    tail func1
 ; CHECK-NEXT:  .LBB0_3: # %sw.bb7
-; CHECK-NEXT:    tail func5 at plt
+; CHECK-NEXT:    tail func5
 ; CHECK-NEXT:  .LBB0_4: # %sw.bb3
-; CHECK-NEXT:    tail func3 at plt
+; CHECK-NEXT:    tail func3
 ; CHECK-NEXT:  .LBB0_5: # %sw.bb5
-; CHECK-NEXT:    tail func4 at plt
+; CHECK-NEXT:    tail func4
 ; CHECK-NEXT:  .LBB0_6: # %sw.bb1
-; CHECK-NEXT:    tail func2 at plt
+; CHECK-NEXT:    tail func2
 ; CHECK-NEXT:  .LBB0_7: # %sw.default
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_offset ra, -8
-; CHECK-NEXT:    call default_func at plt
+; CHECK-NEXT:    call default_func
 ; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap.ll b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
index 16f062a78323ce..40577701e1e240 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
@@ -22,7 +22,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV32I-SW-NO-NEXT:    addi sp, sp, 16
 ; RV32I-SW-NO-NEXT:    ret
 ; RV32I-SW-NO-NEXT:  .LBB0_2: # %if.then
-; RV32I-SW-NO-NEXT:    call abort at plt
+; RV32I-SW-NO-NEXT:    call abort
 ;
 ; RV32I-SW-LABEL: eliminate_restore:
 ; RV32I-SW:       # %bb.0:
@@ -33,7 +33,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV32I-SW-NEXT:  .LBB0_2: # %if.then
 ; RV32I-SW-NEXT:    addi sp, sp, -16
 ; RV32I-SW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-SW-NEXT:    call abort at plt
+; RV32I-SW-NEXT:    call abort
 ;
 ; RV32I-SW-SR-LABEL: eliminate_restore:
 ; RV32I-SW-SR:       # %bb.0:
@@ -43,7 +43,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV32I-SW-SR-NEXT:    ret
 ; RV32I-SW-SR-NEXT:  .LBB0_2: # %if.then
 ; RV32I-SW-SR-NEXT:    call t0, __riscv_save_0
-; RV32I-SW-SR-NEXT:    call abort at plt
+; RV32I-SW-SR-NEXT:    call abort
 ;
 ; RV64I-SW-LABEL: eliminate_restore:
 ; RV64I-SW:       # %bb.0:
@@ -55,7 +55,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV64I-SW-NEXT:  .LBB0_2: # %if.then
 ; RV64I-SW-NEXT:    addi sp, sp, -16
 ; RV64I-SW-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-SW-NEXT:    call abort at plt
+; RV64I-SW-NEXT:    call abort
   %cmp = icmp ule i32 %n, 32
   br i1 %cmp, label %if.then, label %if.end
 
@@ -83,7 +83,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-SW-NO-NEXT:    andi a0, a0, -16
 ; RV32I-SW-NO-NEXT:    sub a0, sp, a0
 ; RV32I-SW-NO-NEXT:    mv sp, a0
-; RV32I-SW-NO-NEXT:    call notdead at plt
+; RV32I-SW-NO-NEXT:    call notdead
 ; RV32I-SW-NO-NEXT:  .LBB1_2: # %if.end
 ; RV32I-SW-NO-NEXT:    addi sp, s0, -16
 ; RV32I-SW-NO-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -104,7 +104,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-SW-NEXT:    andi a0, a0, -16
 ; RV32I-SW-NEXT:    sub a0, sp, a0
 ; RV32I-SW-NEXT:    mv sp, a0
-; RV32I-SW-NEXT:    call notdead at plt
+; RV32I-SW-NEXT:    call notdead
 ; RV32I-SW-NEXT:    addi sp, s0, -16
 ; RV32I-SW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-SW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -123,7 +123,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-SW-SR-NEXT:    andi a0, a0, -16
 ; RV32I-SW-SR-NEXT:    sub a0, sp, a0
 ; RV32I-SW-SR-NEXT:    mv sp, a0
-; RV32I-SW-SR-NEXT:    call notdead at plt
+; RV32I-SW-SR-NEXT:    call notdead
 ; RV32I-SW-SR-NEXT:    addi sp, s0, -16
 ; RV32I-SW-SR-NEXT:    tail __riscv_restore_1
 ; RV32I-SW-SR-NEXT:  .LBB1_2: # %if.end
@@ -145,7 +145,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV64I-SW-NEXT:    andi a0, a0, -16
 ; RV64I-SW-NEXT:    sub a0, sp, a0
 ; RV64I-SW-NEXT:    mv sp, a0
-; RV64I-SW-NEXT:    call notdead at plt
+; RV64I-SW-NEXT:    call notdead
 ; RV64I-SW-NEXT:    addi sp, s0, -16
 ; RV64I-SW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-SW-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
index 63a2c04de6c105..7889e005399f5c 100644
--- a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
+++ b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
@@ -10,7 +10,7 @@ define i32 @SplitSP() nounwind {
 ; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    addi a0, sp, 16
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
@@ -29,7 +29,7 @@ define i32 @NoSplitSP() nounwind {
 ; RV32I-NEXT:    addi sp, sp, -2032
 ; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a0, sp, 4
-; RV32I-NEXT:    call foo at plt
+; RV32I-NEXT:    call foo
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
diff --git a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
index b15f17ea9fb5a8..5fa802b7f27cad 100644
--- a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
@@ -121,7 +121,7 @@ define iXLen2 @test_udiv_7(iXLen2 %x) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 7
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    call __udivdi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -132,7 +132,7 @@ define iXLen2 @test_udiv_7(iXLen2 %x) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    li a2, 7
 ; RV64-NEXT:    li a3, 0
-; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    call __udivti3
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -147,7 +147,7 @@ define iXLen2 @test_udiv_9(iXLen2 %x) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 9
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    call __udivdi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -158,7 +158,7 @@ define iXLen2 @test_udiv_9(iXLen2 %x) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    li a2, 9
 ; RV64-NEXT:    li a3, 0
-; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    call __udivti3
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll b/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
index cdfb1ef0ab4d6b..8444520fcc7718 100644
--- a/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
@@ -83,7 +83,7 @@ define iXLen2 @test_urem_7(iXLen2 %x) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 7
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    call __umoddi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -94,7 +94,7 @@ define iXLen2 @test_urem_7(iXLen2 %x) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    li a2, 7
 ; RV64-NEXT:    li a3, 0
-; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    call __umodti3
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -109,7 +109,7 @@ define iXLen2 @test_urem_9(iXLen2 %x) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a2, 9
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    call __umoddi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -120,7 +120,7 @@ define iXLen2 @test_urem_9(iXLen2 %x) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    li a2, 9
 ; RV64-NEXT:    li a3, 0
-; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    call __umodti3
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index 1dcb0438239238..7c291bbceedc6d 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -12,7 +12,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_positive_odd:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, 95
-; RV32I-NEXT:    tail __modsi3 at plt
+; RV32I-NEXT:    tail __modsi3
 ;
 ; RV32IM-LABEL: fold_srem_positive_odd:
 ; RV32IM:       # %bb.0:
@@ -34,7 +34,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    li a1, 95
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -63,7 +63,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_positive_even:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, 1060
-; RV32I-NEXT:    tail __modsi3 at plt
+; RV32I-NEXT:    tail __modsi3
 ;
 ; RV32IM-LABEL: fold_srem_positive_even:
 ; RV32IM:       # %bb.0:
@@ -84,7 +84,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    li a1, 1060
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -111,7 +111,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_negative_odd:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, -723
-; RV32I-NEXT:    tail __modsi3 at plt
+; RV32I-NEXT:    tail __modsi3
 ;
 ; RV32IM-LABEL: fold_srem_negative_odd:
 ; RV32IM:       # %bb.0:
@@ -132,7 +132,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    li a1, -723
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -160,7 +160,7 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 1048570
 ; RV32I-NEXT:    addi a1, a1, 1595
-; RV32I-NEXT:    tail __modsi3 at plt
+; RV32I-NEXT:    tail __modsi3
 ;
 ; RV32IM-LABEL: fold_srem_negative_even:
 ; RV32IM:       # %bb.0:
@@ -183,7 +183,7 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    lui a1, 1048570
 ; RV64I-NEXT:    addiw a1, a1, 1595
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -217,11 +217,11 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 95
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    add a0, s1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -253,11 +253,11 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV64I-NEXT:    sext.w s0, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    addw a0, s1, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -391,7 +391,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 98
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    call __moddi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -402,7 +402,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    li a2, 98
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    call __moddi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -410,7 +410,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV64I-LABEL: dont_fold_srem_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a1, 98
-; RV64I-NEXT:    tail __moddi3 at plt
+; RV64I-NEXT:    tail __moddi3
 ;
 ; RV64IM-LABEL: dont_fold_srem_i64:
 ; RV64IM:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 122388c1b73ec3..30ac8de517f680 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -13,7 +13,7 @@ define i1 @test_srem_odd(i29 %X) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lui a1, 128424
 ; RV32-NEXT:    addi a1, a1, 331
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    lui a1, 662
 ; RV32-NEXT:    addi a1, a1, -83
 ; RV32-NEXT:    add a0, a0, a1
@@ -32,7 +32,7 @@ define i1 @test_srem_odd(i29 %X) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    lui a1, 128424
 ; RV64-NEXT:    addiw a1, a1, 331
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    lui a1, 662
 ; RV64-NEXT:    addi a1, a1, -83
 ; RV64-NEXT:    add a0, a0, a1
@@ -117,7 +117,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV32-NEXT:    slli a0, a0, 28
 ; RV32-NEXT:    srai a0, a0, 28
 ; RV32-NEXT:    li a1, 6
-; RV32-NEXT:    call __modsi3 at plt
+; RV32-NEXT:    call __modsi3
 ; RV32-NEXT:    addi a0, a0, -1
 ; RV32-NEXT:    seqz a0, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -131,7 +131,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV64-NEXT:    slli a0, a0, 60
 ; RV64-NEXT:    srai a0, a0, 60
 ; RV64-NEXT:    li a1, 6
-; RV64-NEXT:    call __moddi3 at plt
+; RV64-NEXT:    call __moddi3
 ; RV64-NEXT:    addi a0, a0, -1
 ; RV64-NEXT:    seqz a0, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -330,21 +330,21 @@ define void @test_srem_vec(ptr %X) nounwind {
 ; RV32-NEXT:    srai a1, a1, 31
 ; RV32-NEXT:    li a2, 6
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __moddi3 at plt
+; RV32-NEXT:    call __moddi3
 ; RV32-NEXT:    mv s5, a0
 ; RV32-NEXT:    mv s6, a1
 ; RV32-NEXT:    li a2, 7
 ; RV32-NEXT:    mv a0, s2
 ; RV32-NEXT:    mv a1, s4
 ; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    call __moddi3 at plt
+; RV32-NEXT:    call __moddi3
 ; RV32-NEXT:    mv s2, a0
 ; RV32-NEXT:    mv s4, a1
 ; RV32-NEXT:    li a2, -5
 ; RV32-NEXT:    li a3, -1
 ; RV32-NEXT:    mv a0, s1
 ; RV32-NEXT:    mv a1, s3
-; RV32-NEXT:    call __moddi3 at plt
+; RV32-NEXT:    call __moddi3
 ; RV32-NEXT:    or a2, s5, s6
 ; RV32-NEXT:    snez a2, a2
 ; RV32-NEXT:    xori a0, a0, 2
@@ -403,18 +403,18 @@ define void @test_srem_vec(ptr %X) nounwind {
 ; RV64-NEXT:    slli a2, a2, 31
 ; RV64-NEXT:    srai s2, a2, 31
 ; RV64-NEXT:    li a1, 7
-; RV64-NEXT:    call __moddi3 at plt
+; RV64-NEXT:    call __moddi3
 ; RV64-NEXT:    mv s3, a0
 ; RV64-NEXT:    li a1, -5
 ; RV64-NEXT:    mv a0, s1
-; RV64-NEXT:    call __moddi3 at plt
+; RV64-NEXT:    call __moddi3
 ; RV64-NEXT:    mv s1, a0
 ; RV64-NEXT:    lui a0, 699051
 ; RV64-NEXT:    addiw a1, a0, -1365
 ; RV64-NEXT:    slli a0, a1, 32
 ; RV64-NEXT:    add a1, a1, a0
 ; RV64-NEXT:    mv a0, s2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
 ; RV64-NEXT:    add a0, a0, a1
@@ -482,21 +482,21 @@ define void @test_srem_vec(ptr %X) nounwind {
 ; RV32M-NEXT:    srai a1, a1, 31
 ; RV32M-NEXT:    li a2, 6
 ; RV32M-NEXT:    li a3, 0
-; RV32M-NEXT:    call __moddi3 at plt
+; RV32M-NEXT:    call __moddi3
 ; RV32M-NEXT:    mv s5, a0
 ; RV32M-NEXT:    mv s6, a1
 ; RV32M-NEXT:    li a2, 7
 ; RV32M-NEXT:    mv a0, s2
 ; RV32M-NEXT:    mv a1, s4
 ; RV32M-NEXT:    li a3, 0
-; RV32M-NEXT:    call __moddi3 at plt
+; RV32M-NEXT:    call __moddi3
 ; RV32M-NEXT:    mv s2, a0
 ; RV32M-NEXT:    mv s4, a1
 ; RV32M-NEXT:    li a2, -5
 ; RV32M-NEXT:    li a3, -1
 ; RV32M-NEXT:    mv a0, s1
 ; RV32M-NEXT:    mv a1, s3
-; RV32M-NEXT:    call __moddi3 at plt
+; RV32M-NEXT:    call __moddi3
 ; RV32M-NEXT:    or a2, s5, s6
 ; RV32M-NEXT:    snez a2, a2
 ; RV32M-NEXT:    xori a0, a0, 2
@@ -632,7 +632,7 @@ define void @test_srem_vec(ptr %X) nounwind {
 ; RV32MV-NEXT:    srai a1, a1, 31
 ; RV32MV-NEXT:    li a2, 6
 ; RV32MV-NEXT:    li a3, 0
-; RV32MV-NEXT:    call __moddi3 at plt
+; RV32MV-NEXT:    call __moddi3
 ; RV32MV-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32MV-NEXT:    vmv.v.x v8, a0
 ; RV32MV-NEXT:    vslide1down.vx v8, v8, a1
@@ -642,7 +642,7 @@ define void @test_srem_vec(ptr %X) nounwind {
 ; RV32MV-NEXT:    mv a0, s2
 ; RV32MV-NEXT:    mv a1, s4
 ; RV32MV-NEXT:    li a3, 0
-; RV32MV-NEXT:    call __moddi3 at plt
+; RV32MV-NEXT:    call __moddi3
 ; RV32MV-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32MV-NEXT:    addi a2, sp, 16
 ; RV32MV-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -654,7 +654,7 @@ define void @test_srem_vec(ptr %X) nounwind {
 ; RV32MV-NEXT:    li a3, -1
 ; RV32MV-NEXT:    mv a0, s1
 ; RV32MV-NEXT:    mv a1, s3
-; RV32MV-NEXT:    call __moddi3 at plt
+; RV32MV-NEXT:    call __moddi3
 ; RV32MV-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32MV-NEXT:    addi a2, sp, 16
 ; RV32MV-NEXT:    vl2r.v v8, (a2) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 3335ca3a34b6c6..ec6e978c2c68e2 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -25,19 +25,19 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    li a1, -124
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 98
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    li a1, -1003
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    sh a0, 6(s3)
 ; RV32I-NEXT:    sh s1, 4(s3)
 ; RV32I-NEXT:    sh s2, 2(s3)
@@ -117,19 +117,19 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    li a1, -124
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 98
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a1, -1003
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    sh a0, 6(s3)
 ; RV64I-NEXT:    sh s1, 4(s3)
 ; RV64I-NEXT:    sh s2, 2(s3)
@@ -213,19 +213,19 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    sh a0, 6(s3)
 ; RV32I-NEXT:    sh s1, 4(s3)
 ; RV32I-NEXT:    sh s2, 2(s3)
@@ -298,19 +298,19 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    sh a0, 6(s3)
 ; RV64I-NEXT:    sh s1, 4(s3)
 ; RV64I-NEXT:    sh s2, 2(s3)
@@ -393,35 +393,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s8, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    call __divsi3
 ; RV32I-NEXT:    add a0, s8, a0
 ; RV32I-NEXT:    add s2, s7, s2
 ; RV32I-NEXT:    add s3, s6, s3
@@ -510,35 +510,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s6, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s7, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s8, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    call __divdi3
 ; RV64I-NEXT:    add a0, s8, a0
 ; RV64I-NEXT:    add s2, s7, s2
 ; RV64I-NEXT:    add s3, s6, s3
@@ -640,7 +640,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    andi a1, a1, -8
 ; RV32I-NEXT:    sub s3, a3, a1
 ; RV32I-NEXT:    li a1, 95
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    sh a0, 6(s0)
 ; RV32I-NEXT:    sh s3, 4(s0)
 ; RV32I-NEXT:    sh s2, 2(s0)
@@ -713,7 +713,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    andi a1, a1, -8
 ; RV64I-NEXT:    subw s3, a3, a1
 ; RV64I-NEXT:    li a1, 95
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    sh a0, 6(s0)
 ; RV64I-NEXT:    sh s3, 4(s0)
 ; RV64I-NEXT:    sh s2, 2(s0)
@@ -779,16 +779,16 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 654
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 23
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a1, a0, 1327
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    sh a0, 6(s2)
 ; RV32I-NEXT:    sh s1, 4(s2)
 ; RV32I-NEXT:    sh s3, 2(s2)
@@ -856,16 +856,16 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    sh a0, 6(s2)
 ; RV64I-NEXT:    sh s1, 4(s2)
 ; RV64I-NEXT:    sh s3, 2(s2)
@@ -941,12 +941,12 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    and a1, a1, a3
 ; RV32I-NEXT:    sub s3, a2, a1
 ; RV32I-NEXT:    li a1, 23
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a1, a0, 1327
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    call __modsi3
 ; RV32I-NEXT:    sh a0, 6(s0)
 ; RV32I-NEXT:    sh s2, 4(s0)
 ; RV32I-NEXT:    sh zero, 0(s0)
@@ -1013,12 +1013,12 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    subw s3, a2, a1
 ; RV64I-NEXT:    li a1, 23
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    sh a0, 6(s0)
 ; RV64I-NEXT:    sh s2, 4(s0)
 ; RV64I-NEXT:    sh zero, 0(s0)
@@ -1097,21 +1097,21 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    call __moddi3
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    mv s8, a1
 ; RV32I-NEXT:    li a2, 654
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    call __moddi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    li a2, 23
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    call __moddi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    lui a0, 1
@@ -1119,7 +1119,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    call __moddi3
 ; RV32I-NEXT:    sw a1, 28(s6)
 ; RV32I-NEXT:    sw a0, 24(s6)
 ; RV32I-NEXT:    sw s3, 20(s6)
@@ -1166,21 +1166,21 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    li a2, 1
 ; RV32IM-NEXT:    mv a0, a3
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    call __moddi3
 ; RV32IM-NEXT:    mv s7, a0
 ; RV32IM-NEXT:    mv s8, a1
 ; RV32IM-NEXT:    li a2, 654
 ; RV32IM-NEXT:    mv a0, s4
 ; RV32IM-NEXT:    mv a1, s5
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    call __moddi3
 ; RV32IM-NEXT:    mv s4, a0
 ; RV32IM-NEXT:    mv s5, a1
 ; RV32IM-NEXT:    li a2, 23
 ; RV32IM-NEXT:    mv a0, s2
 ; RV32IM-NEXT:    mv a1, s3
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    call __moddi3
 ; RV32IM-NEXT:    mv s2, a0
 ; RV32IM-NEXT:    mv s3, a1
 ; RV32IM-NEXT:    lui a0, 1
@@ -1188,7 +1188,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    mv a0, s0
 ; RV32IM-NEXT:    mv a1, s1
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    call __moddi3
 ; RV32IM-NEXT:    sw a1, 28(s6)
 ; RV32IM-NEXT:    sw a0, 24(s6)
 ; RV32IM-NEXT:    sw s3, 20(s6)
@@ -1224,16 +1224,16 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    call __moddi3
 ; RV64I-NEXT:    sd a0, 24(s2)
 ; RV64I-NEXT:    sd s1, 16(s2)
 ; RV64I-NEXT:    sd s3, 8(s2)
diff --git a/llvm/test/CodeGen/RISCV/stack-protector-target.ll b/llvm/test/CodeGen/RISCV/stack-protector-target.ll
index 410b89df1f35c7..13abde7878f3a5 100644
--- a/llvm/test/CodeGen/RISCV/stack-protector-target.ll
+++ b/llvm/test/CodeGen/RISCV/stack-protector-target.ll
@@ -12,7 +12,7 @@ define void @func() sspreq nounwind {
 ; FUCHSIA-RISCV64-NEXT:    ld a0, -16(tp)
 ; FUCHSIA-RISCV64-NEXT:    sd a0, 16(sp)
 ; FUCHSIA-RISCV64-NEXT:    addi a0, sp, 12
-; FUCHSIA-RISCV64-NEXT:    call capture at plt
+; FUCHSIA-RISCV64-NEXT:    call capture
 ; FUCHSIA-RISCV64-NEXT:    ld a0, -16(tp)
 ; FUCHSIA-RISCV64-NEXT:    ld a1, 16(sp)
 ; FUCHSIA-RISCV64-NEXT:    bne a0, a1, .LBB0_2
@@ -21,7 +21,7 @@ define void @func() sspreq nounwind {
 ; FUCHSIA-RISCV64-NEXT:    addi sp, sp, 32
 ; FUCHSIA-RISCV64-NEXT:    ret
 ; FUCHSIA-RISCV64-NEXT:  .LBB0_2: # %CallStackCheckFailBlk
-; FUCHSIA-RISCV64-NEXT:    call __stack_chk_fail at plt
+; FUCHSIA-RISCV64-NEXT:    call __stack_chk_fail
   %1 = alloca i32, align 4
   call void @capture(ptr %1)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
index f10dfbd313cc12..c93153e8f9d1ec 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
@@ -26,7 +26,7 @@ define void @caller(i32 %n) {
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -64
 ; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
@@ -55,7 +55,7 @@ define void @caller(i32 %n) {
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
 ; RV64I-NEXT:    mv a1, s1
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -64
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll
index d87db54fd5bf43..afa8efedbff3e9 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll
@@ -19,7 +19,7 @@ define void @caller32() {
 ; RV32I-NEXT:    .cfi_def_cfa s0, 0
 ; RV32I-NEXT:    andi sp, sp, -32
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -32
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -38,7 +38,7 @@ define void @caller32() {
 ; RV64I-NEXT:    .cfi_def_cfa s0, 0
 ; RV64I-NEXT:    andi sp, sp, -32
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -32
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -57,7 +57,7 @@ define void @caller_no_realign32() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -69,7 +69,7 @@ define void @caller_no_realign32() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -91,7 +91,7 @@ define void @caller64() {
 ; RV32I-NEXT:    .cfi_def_cfa s0, 0
 ; RV32I-NEXT:    andi sp, sp, -64
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -64
 ; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
@@ -110,7 +110,7 @@ define void @caller64() {
 ; RV64I-NEXT:    .cfi_def_cfa s0, 0
 ; RV64I-NEXT:    andi sp, sp, -64
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -64
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
@@ -129,7 +129,7 @@ define void @caller_no_realign64() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -141,7 +141,7 @@ define void @caller_no_realign64() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -163,7 +163,7 @@ define void @caller128() {
 ; RV32I-NEXT:    .cfi_def_cfa s0, 0
 ; RV32I-NEXT:    andi sp, sp, -128
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -128
 ; RV32I-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
@@ -182,7 +182,7 @@ define void @caller128() {
 ; RV64I-NEXT:    .cfi_def_cfa s0, 0
 ; RV64I-NEXT:    andi sp, sp, -128
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -128
 ; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
@@ -201,7 +201,7 @@ define void @caller_no_realign128() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -213,7 +213,7 @@ define void @caller_no_realign128() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -235,7 +235,7 @@ define void @caller256() {
 ; RV32I-NEXT:    .cfi_def_cfa s0, 0
 ; RV32I-NEXT:    andi sp, sp, -256
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -256
 ; RV32I-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
@@ -254,7 +254,7 @@ define void @caller256() {
 ; RV64I-NEXT:    .cfi_def_cfa s0, 0
 ; RV64I-NEXT:    andi sp, sp, -256
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -256
 ; RV64I-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
@@ -273,7 +273,7 @@ define void @caller_no_realign256() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -285,7 +285,7 @@ define void @caller_no_realign256() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -307,7 +307,7 @@ define void @caller512() {
 ; RV32I-NEXT:    .cfi_def_cfa s0, 0
 ; RV32I-NEXT:    andi sp, sp, -512
 ; RV32I-NEXT:    addi a0, sp, 512
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -1024
 ; RV32I-NEXT:    lw ra, 1020(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 1016(sp) # 4-byte Folded Reload
@@ -326,7 +326,7 @@ define void @caller512() {
 ; RV64I-NEXT:    .cfi_def_cfa s0, 0
 ; RV64I-NEXT:    andi sp, sp, -512
 ; RV64I-NEXT:    addi a0, sp, 512
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -1024
 ; RV64I-NEXT:    ld ra, 1016(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 1008(sp) # 8-byte Folded Reload
@@ -345,7 +345,7 @@ define void @caller_no_realign512() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -357,7 +357,7 @@ define void @caller_no_realign512() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -380,7 +380,7 @@ define void @caller1024() {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    andi sp, sp, -1024
 ; RV32I-NEXT:    addi a0, sp, 1024
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -2048
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
@@ -401,7 +401,7 @@ define void @caller1024() {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    andi sp, sp, -1024
 ; RV64I-NEXT:    addi a0, sp, 1024
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -2048
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
@@ -421,7 +421,7 @@ define void @caller_no_realign1024() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -433,7 +433,7 @@ define void @caller_no_realign1024() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -458,7 +458,7 @@ define void @caller2048() {
 ; RV32I-NEXT:    andi sp, sp, -2048
 ; RV32I-NEXT:    addi a0, sp, 2047
 ; RV32I-NEXT:    addi a0, a0, 1
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    sub sp, s0, a0
 ; RV32I-NEXT:    addi sp, sp, 2032
@@ -483,7 +483,7 @@ define void @caller2048() {
 ; RV64I-NEXT:    andi sp, sp, -2048
 ; RV64I-NEXT:    addi a0, sp, 2047
 ; RV64I-NEXT:    addi a0, a0, 1
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    sub sp, s0, a0
 ; RV64I-NEXT:    addi sp, sp, 2032
@@ -505,7 +505,7 @@ define void @caller_no_realign2048() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -517,7 +517,7 @@ define void @caller_no_realign2048() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -544,7 +544,7 @@ define void @caller4096() {
 ; RV32I-NEXT:    slli sp, a0, 12
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    add a0, sp, a0
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lui a0, 2
 ; RV32I-NEXT:    sub sp, s0, a0
 ; RV32I-NEXT:    addi a0, a0, -2032
@@ -571,7 +571,7 @@ define void @caller4096() {
 ; RV64I-NEXT:    slli sp, a0, 12
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    add a0, sp, a0
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    lui a0, 2
 ; RV64I-NEXT:    sub sp, s0, a0
 ; RV64I-NEXT:    addiw a0, a0, -2032
@@ -593,7 +593,7 @@ define void @caller_no_realign4096() "no-realign-stack" {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -605,7 +605,7 @@ define void @caller_no_realign4096() "no-realign-stack" {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/stack-slot-size.ll b/llvm/test/CodeGen/RISCV/stack-slot-size.ll
index ab6bd20e2f22a7..1388eaac3a67e7 100644
--- a/llvm/test/CodeGen/RISCV/stack-slot-size.ll
+++ b/llvm/test/CodeGen/RISCV/stack-slot-size.ll
@@ -26,7 +26,7 @@ define i32 @caller129() nounwind {
 ; RV32I-NEXT:    sw zero, 4(sp)
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    sw zero, 0(sp)
-; RV32I-NEXT:    call callee129 at plt
+; RV32I-NEXT:    call callee129
 ; RV32I-NEXT:    lw a0, 24(sp)
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
@@ -42,7 +42,7 @@ define i32 @caller129() nounwind {
 ; RV64I-NEXT:    sd zero, 8(sp)
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    sd zero, 0(sp)
-; RV64I-NEXT:    call callee129 at plt
+; RV64I-NEXT:    call callee129
 ; RV64I-NEXT:    lw a0, 36(sp)
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
@@ -67,7 +67,7 @@ define i32 @caller160() nounwind {
 ; RV32I-NEXT:    sw zero, 4(sp)
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    sw zero, 0(sp)
-; RV32I-NEXT:    call callee160 at plt
+; RV32I-NEXT:    call callee160
 ; RV32I-NEXT:    lw a0, 24(sp)
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
@@ -83,7 +83,7 @@ define i32 @caller160() nounwind {
 ; RV64I-NEXT:    sd zero, 8(sp)
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    sd zero, 0(sp)
-; RV64I-NEXT:    call callee160 at plt
+; RV64I-NEXT:    call callee160
 ; RV64I-NEXT:    lw a0, 36(sp)
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
@@ -109,7 +109,7 @@ define i32 @caller161() nounwind {
 ; RV32I-NEXT:    sw zero, 4(sp)
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    sw zero, 0(sp)
-; RV32I-NEXT:    call callee161 at plt
+; RV32I-NEXT:    call callee161
 ; RV32I-NEXT:    lw a0, 24(sp)
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
@@ -125,7 +125,7 @@ define i32 @caller161() nounwind {
 ; RV64I-NEXT:    sd zero, 8(sp)
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    sd zero, 0(sp)
-; RV64I-NEXT:    call callee161 at plt
+; RV64I-NEXT:    call callee161
 ; RV64I-NEXT:    lw a0, 36(sp)
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
diff --git a/llvm/test/CodeGen/RISCV/stack-store-check.ll b/llvm/test/CodeGen/RISCV/stack-store-check.ll
index 651df94bab496e..91cfb2a4cef706 100644
--- a/llvm/test/CodeGen/RISCV/stack-store-check.ll
+++ b/llvm/test/CodeGen/RISCV/stack-store-check.ll
@@ -44,7 +44,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 600
 ; CHECK-NEXT:    addi a2, sp, 584
 ; CHECK-NEXT:    sw s6, 584(sp)
-; CHECK-NEXT:    call __subtf3 at plt
+; CHECK-NEXT:    call __subtf3
 ; CHECK-NEXT:    lw s1, 616(sp)
 ; CHECK-NEXT:    lw s2, 620(sp)
 ; CHECK-NEXT:    lw s3, 624(sp)
@@ -60,7 +60,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 552
 ; CHECK-NEXT:    addi a2, sp, 536
 ; CHECK-NEXT:    sw s1, 552(sp)
-; CHECK-NEXT:    call __subtf3 at plt
+; CHECK-NEXT:    call __subtf3
 ; CHECK-NEXT:    lw a0, 568(sp)
 ; CHECK-NEXT:    sw a0, 40(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 572(sp)
@@ -80,7 +80,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 504
 ; CHECK-NEXT:    addi a2, sp, 488
 ; CHECK-NEXT:    sw s6, 504(sp)
-; CHECK-NEXT:    call __addtf3 at plt
+; CHECK-NEXT:    call __addtf3
 ; CHECK-NEXT:    lw s9, 520(sp)
 ; CHECK-NEXT:    lw s11, 524(sp)
 ; CHECK-NEXT:    lw s5, 528(sp)
@@ -106,7 +106,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 312
 ; CHECK-NEXT:    addi a2, sp, 296
 ; CHECK-NEXT:    sw s1, 312(sp)
-; CHECK-NEXT:    call __multf3 at plt
+; CHECK-NEXT:    call __multf3
 ; CHECK-NEXT:    lw a0, 328(sp)
 ; CHECK-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 332(sp)
@@ -126,7 +126,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 456
 ; CHECK-NEXT:    addi a2, sp, 440
 ; CHECK-NEXT:    sw s9, 440(sp)
-; CHECK-NEXT:    call __addtf3 at plt
+; CHECK-NEXT:    call __addtf3
 ; CHECK-NEXT:    lw a3, 472(sp)
 ; CHECK-NEXT:    lw a0, 476(sp)
 ; CHECK-NEXT:    lw a1, 480(sp)
@@ -142,7 +142,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 408
 ; CHECK-NEXT:    addi a2, sp, 392
 ; CHECK-NEXT:    sw a3, 392(sp)
-; CHECK-NEXT:    call __subtf3 at plt
+; CHECK-NEXT:    call __subtf3
 ; CHECK-NEXT:    lw a0, 424(sp)
 ; CHECK-NEXT:    lw a1, 436(sp)
 ; CHECK-NEXT:    lw a2, 432(sp)
@@ -171,7 +171,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a2, sp, 200
 ; CHECK-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw s0, 216(sp)
-; CHECK-NEXT:    call __multf3 at plt
+; CHECK-NEXT:    call __multf3
 ; CHECK-NEXT:    lw s1, 232(sp)
 ; CHECK-NEXT:    lw a0, 236(sp)
 ; CHECK-NEXT:    sw a0, 0(sp) # 4-byte Folded Spill
@@ -189,7 +189,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 360
 ; CHECK-NEXT:    addi a2, sp, 344
 ; CHECK-NEXT:    sw s9, 360(sp)
-; CHECK-NEXT:    call __multf3 at plt
+; CHECK-NEXT:    call __multf3
 ; CHECK-NEXT:    lw a0, 376(sp)
 ; CHECK-NEXT:    lw a1, 388(sp)
 ; CHECK-NEXT:    lw a2, 384(sp)
@@ -215,7 +215,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a2, sp, 248
 ; CHECK-NEXT:    lw a3, 44(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a3, 264(sp)
-; CHECK-NEXT:    call __subtf3 at plt
+; CHECK-NEXT:    call __subtf3
 ; CHECK-NEXT:    lw a0, 280(sp)
 ; CHECK-NEXT:    lw a1, 292(sp)
 ; CHECK-NEXT:    lw a2, 288(sp)
@@ -237,7 +237,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 168
 ; CHECK-NEXT:    addi a2, sp, 152
 ; CHECK-NEXT:    sw s1, 168(sp)
-; CHECK-NEXT:    call __addtf3 at plt
+; CHECK-NEXT:    call __addtf3
 ; CHECK-NEXT:    lw a0, 184(sp)
 ; CHECK-NEXT:    lw a1, 196(sp)
 ; CHECK-NEXT:    lw a2, 192(sp)
@@ -259,7 +259,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a2, sp, 104
 ; CHECK-NEXT:    lw a3, 52(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a3, 120(sp)
-; CHECK-NEXT:    call __multf3 at plt
+; CHECK-NEXT:    call __multf3
 ; CHECK-NEXT:    lw a3, 136(sp)
 ; CHECK-NEXT:    lw a0, 140(sp)
 ; CHECK-NEXT:    lw a1, 144(sp)
@@ -276,7 +276,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 72
 ; CHECK-NEXT:    addi a2, sp, 56
 ; CHECK-NEXT:    sw a3, 72(sp)
-; CHECK-NEXT:    call __addtf3 at plt
+; CHECK-NEXT:    call __addtf3
 ; CHECK-NEXT:    lw a0, 96(sp)
 ; CHECK-NEXT:    lw a1, 100(sp)
 ; CHECK-NEXT:    lw a2, 88(sp)
diff --git a/llvm/test/CodeGen/RISCV/tls-models.ll b/llvm/test/CodeGen/RISCV/tls-models.ll
index d9b37cb0c7c22c..c2ed44073baac1 100644
--- a/llvm/test/CodeGen/RISCV/tls-models.ll
+++ b/llvm/test/CodeGen/RISCV/tls-models.ll
@@ -26,7 +26,7 @@ define ptr @f1() nounwind {
 ; RV32-PIC-NEXT:  .Lpcrel_hi0:
 ; RV32-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(unspecified)
 ; RV32-PIC-NEXT:    addi a0, a0, %pcrel_lo(.Lpcrel_hi0)
-; RV32-PIC-NEXT:    call __tls_get_addr at plt
+; RV32-PIC-NEXT:    call __tls_get_addr
 ; RV32-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-PIC-NEXT:    addi sp, sp, 16
 ; RV32-PIC-NEXT:    ret
@@ -38,7 +38,7 @@ define ptr @f1() nounwind {
 ; RV64-PIC-NEXT:  .Lpcrel_hi0:
 ; RV64-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(unspecified)
 ; RV64-PIC-NEXT:    addi a0, a0, %pcrel_lo(.Lpcrel_hi0)
-; RV64-PIC-NEXT:    call __tls_get_addr at plt
+; RV64-PIC-NEXT:    call __tls_get_addr
 ; RV64-PIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-PIC-NEXT:    addi sp, sp, 16
 ; RV64-PIC-NEXT:    ret
@@ -73,7 +73,7 @@ define ptr @f2() nounwind {
 ; RV32-PIC-NEXT:  .Lpcrel_hi1:
 ; RV32-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(ld)
 ; RV32-PIC-NEXT:    addi a0, a0, %pcrel_lo(.Lpcrel_hi1)
-; RV32-PIC-NEXT:    call __tls_get_addr at plt
+; RV32-PIC-NEXT:    call __tls_get_addr
 ; RV32-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-PIC-NEXT:    addi sp, sp, 16
 ; RV32-PIC-NEXT:    ret
@@ -85,7 +85,7 @@ define ptr @f2() nounwind {
 ; RV64-PIC-NEXT:  .Lpcrel_hi1:
 ; RV64-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(ld)
 ; RV64-PIC-NEXT:    addi a0, a0, %pcrel_lo(.Lpcrel_hi1)
-; RV64-PIC-NEXT:    call __tls_get_addr at plt
+; RV64-PIC-NEXT:    call __tls_get_addr
 ; RV64-PIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-PIC-NEXT:    addi sp, sp, 16
 ; RV64-PIC-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
index 0772109a5525aa..22c0b798e1468c 100644
--- a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
+++ b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
@@ -915,7 +915,7 @@ define i32 @in_multiuse_A(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
 ; RV32-NEXT:    xor a0, a0, a1
 ; RV32-NEXT:    and s1, a0, a3
 ; RV32-NEXT:    mv a0, s1
-; RV32-NEXT:    call use32 at plt
+; RV32-NEXT:    call use32
 ; RV32-NEXT:    xor a0, s1, s0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -933,7 +933,7 @@ define i32 @in_multiuse_A(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
 ; RV64-NEXT:    xor a0, a0, a1
 ; RV64-NEXT:    and s1, a0, a3
 ; RV64-NEXT:    mv a0, s1
-; RV64-NEXT:    call use32 at plt
+; RV64-NEXT:    call use32
 ; RV64-NEXT:    xor a0, s1, s0
 ; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -957,7 +957,7 @@ define i32 @in_multiuse_B(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
 ; RV32-NEXT:    mv s0, a1
 ; RV32-NEXT:    xor a0, a0, a1
 ; RV32-NEXT:    and s1, a0, a3
-; RV32-NEXT:    call use32 at plt
+; RV32-NEXT:    call use32
 ; RV32-NEXT:    xor a0, s1, s0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -974,7 +974,7 @@ define i32 @in_multiuse_B(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
 ; RV64-NEXT:    mv s0, a1
 ; RV64-NEXT:    xor a0, a0, a1
 ; RV64-NEXT:    and s1, a0, a3
-; RV64-NEXT:    call use32 at plt
+; RV64-NEXT:    call use32
 ; RV64-NEXT:    xor a0, s1, s0
 ; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index 1b2cc1398ec11c..f83a933c0b5c82 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -12,7 +12,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_urem_positive_odd:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, 95
-; RV32I-NEXT:    tail __umodsi3 at plt
+; RV32I-NEXT:    tail __umodsi3
 ;
 ; RV32IM-LABEL: fold_urem_positive_odd:
 ; RV32IM:       # %bb.0:
@@ -35,7 +35,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    li a1, 95
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -65,7 +65,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_urem_positive_even:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a1, 1060
-; RV32I-NEXT:    tail __umodsi3 at plt
+; RV32I-NEXT:    tail __umodsi3
 ;
 ; RV32IM-LABEL: fold_urem_positive_even:
 ; RV32IM:       # %bb.0:
@@ -85,7 +85,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    li a1, 1060
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -117,11 +117,11 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 95
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    add a0, s1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -154,11 +154,11 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV64I-NEXT:    srli s0, a0, 32
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    add a0, s1, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -226,7 +226,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 98
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    call __umoddi3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -237,7 +237,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    li a2, 98
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    call __umoddi3
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
@@ -245,7 +245,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV64I-LABEL: dont_fold_urem_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a1, 98
-; RV64I-NEXT:    tail __umoddi3 at plt
+; RV64I-NEXT:    tail __umoddi3
 ;
 ; RV64IM-LABEL: dont_fold_urem_i64:
 ; RV64IM:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index f629c0d178913c..4544cbaf852138 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -13,7 +13,7 @@ define i1 @test_urem_odd(i13 %X) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lui a1, 1
 ; RV32-NEXT:    addi a1, a1, -819
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a0, a0, 19
 ; RV32-NEXT:    srli a0, a0, 19
 ; RV32-NEXT:    sltiu a0, a0, 1639
@@ -27,7 +27,7 @@ define i1 @test_urem_odd(i13 %X) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    lui a1, 1
 ; RV64-NEXT:    addiw a1, a1, -819
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a0, a0, 51
 ; RV64-NEXT:    srli a0, a0, 51
 ; RV64-NEXT:    sltiu a0, a0, 1639
@@ -86,7 +86,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lui a1, 28087
 ; RV32-NEXT:    addi a1, a1, -585
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a1, a0, 26
 ; RV32-NEXT:    slli a0, a0, 5
 ; RV32-NEXT:    srli a0, a0, 6
@@ -106,7 +106,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    lui a1, 28087
 ; RV64-NEXT:    addiw a1, a1, -585
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a1, a0, 26
 ; RV64-NEXT:    slli a0, a0, 37
 ; RV64-NEXT:    srli a0, a0, 38
@@ -259,7 +259,7 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li a1, 307
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    andi a0, a0, 511
 ; RV32-NEXT:    sltiu a0, a0, 2
 ; RV32-NEXT:    xori a0, a0, 1
@@ -272,7 +272,7 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    li a1, 307
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    andi a0, a0, 511
 ; RV64-NEXT:    sltiu a0, a0, 2
 ; RV64-NEXT:    xori a0, a0, 1
@@ -338,7 +338,7 @@ define void @test_urem_vec(ptr %X) nounwind {
 ; RV32-NEXT:    srli s2, a1, 11
 ; RV32-NEXT:    andi a0, a1, 2047
 ; RV32-NEXT:    li a1, 683
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    slli a1, a0, 10
 ; RV32-NEXT:    slli a0, a0, 21
 ; RV32-NEXT:    srli a0, a0, 22
@@ -347,13 +347,13 @@ define void @test_urem_vec(ptr %X) nounwind {
 ; RV32-NEXT:    sltiu s3, a0, 342
 ; RV32-NEXT:    li a1, 819
 ; RV32-NEXT:    mv a0, s1
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    addi a0, a0, -1638
 ; RV32-NEXT:    andi a0, a0, 2047
 ; RV32-NEXT:    sltiu s1, a0, 2
 ; RV32-NEXT:    li a1, 1463
 ; RV32-NEXT:    mv a0, s2
-; RV32-NEXT:    call __mulsi3 at plt
+; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    addi a0, a0, -1463
 ; RV32-NEXT:    andi a0, a0, 2047
 ; RV32-NEXT:    sltiu a0, a0, 293
@@ -395,7 +395,7 @@ define void @test_urem_vec(ptr %X) nounwind {
 ; RV64-NEXT:    srli s2, a0, 11
 ; RV64-NEXT:    andi a0, a0, 2047
 ; RV64-NEXT:    li a1, 683
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    slli a1, a0, 10
 ; RV64-NEXT:    slli a0, a0, 53
 ; RV64-NEXT:    srli a0, a0, 54
@@ -404,13 +404,13 @@ define void @test_urem_vec(ptr %X) nounwind {
 ; RV64-NEXT:    sltiu s3, a0, 342
 ; RV64-NEXT:    li a1, 1463
 ; RV64-NEXT:    mv a0, s2
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    addi a0, a0, -1463
 ; RV64-NEXT:    andi a0, a0, 2047
 ; RV64-NEXT:    sltiu s2, a0, 293
 ; RV64-NEXT:    li a1, 819
 ; RV64-NEXT:    mv a0, s1
-; RV64-NEXT:    call __muldi3 at plt
+; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    addi a0, a0, -1638
 ; RV64-NEXT:    andi a0, a0, 2047
 ; RV64-NEXT:    sltiu a0, a0, 2
diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index 32aca29d16e9b9..eea8e64f2dddb2 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -26,19 +26,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    li a1, 124
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 98
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    li a1, 1003
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    sh a0, 6(s3)
 ; RV32I-NEXT:    sh s1, 4(s3)
 ; RV32I-NEXT:    sh s2, 2(s3)
@@ -104,19 +104,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    li a1, 124
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 98
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a1, 1003
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    sh a0, 6(s3)
 ; RV64I-NEXT:    sh s1, 4(s3)
 ; RV64I-NEXT:    sh s2, 2(s3)
@@ -186,19 +186,19 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    sh a0, 6(s3)
 ; RV32I-NEXT:    sh s1, 4(s3)
 ; RV32I-NEXT:    sh s2, 2(s3)
@@ -255,19 +255,19 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    sh a0, 6(s3)
 ; RV64I-NEXT:    sh s1, 4(s3)
 ; RV64I-NEXT:    sh s2, 2(s3)
@@ -334,35 +334,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s8, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    call __udivsi3
 ; RV32I-NEXT:    add a0, s8, a0
 ; RV32I-NEXT:    add s2, s7, s2
 ; RV32I-NEXT:    add s3, s6, s3
@@ -435,35 +435,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s6, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s7, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s8, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    call __udivdi3
 ; RV64I-NEXT:    add a0, s8, a0
 ; RV64I-NEXT:    add s2, s7, s2
 ; RV64I-NEXT:    add s3, s6, s3
@@ -538,7 +538,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    andi a1, s3, 63
 ; RV32I-NEXT:    andi a2, s2, 31
 ; RV32I-NEXT:    andi s1, s1, 7
@@ -590,7 +590,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    andi a1, s3, 63
 ; RV64I-NEXT:    andi a2, s2, 31
 ; RV64I-NEXT:    andi s1, s1, 7
@@ -646,16 +646,16 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    li a1, 654
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    li a1, 23
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a1, a0, 1327
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    call __umodsi3
 ; RV32I-NEXT:    sh a0, 6(s2)
 ; RV32I-NEXT:    sh s1, 4(s2)
 ; RV32I-NEXT:    sh s3, 2(s2)
@@ -712,16 +712,16 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    sh a0, 6(s2)
 ; RV64I-NEXT:    sh s1, 4(s2)
 ; RV64I-NEXT:    sh s3, 2(s2)
@@ -803,21 +803,21 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    call __umoddi3
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    mv s8, a1
 ; RV32I-NEXT:    li a2, 654
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    call __umoddi3
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    li a2, 23
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    call __umoddi3
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    lui a0, 1
@@ -825,7 +825,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    li a3, 0
-; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    call __umoddi3
 ; RV32I-NEXT:    sw a1, 28(s6)
 ; RV32I-NEXT:    sw a0, 24(s6)
 ; RV32I-NEXT:    sw s3, 20(s6)
@@ -872,21 +872,21 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    li a2, 1
 ; RV32IM-NEXT:    mv a0, a3
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    call __umoddi3
 ; RV32IM-NEXT:    mv s7, a0
 ; RV32IM-NEXT:    mv s8, a1
 ; RV32IM-NEXT:    li a2, 654
 ; RV32IM-NEXT:    mv a0, s4
 ; RV32IM-NEXT:    mv a1, s5
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    call __umoddi3
 ; RV32IM-NEXT:    mv s4, a0
 ; RV32IM-NEXT:    mv s5, a1
 ; RV32IM-NEXT:    li a2, 23
 ; RV32IM-NEXT:    mv a0, s2
 ; RV32IM-NEXT:    mv a1, s3
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    call __umoddi3
 ; RV32IM-NEXT:    mv s2, a0
 ; RV32IM-NEXT:    mv s3, a1
 ; RV32IM-NEXT:    lui a0, 1
@@ -894,7 +894,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    mv a0, s0
 ; RV32IM-NEXT:    mv a1, s1
 ; RV32IM-NEXT:    li a3, 0
-; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    call __umoddi3
 ; RV32IM-NEXT:    sw a1, 28(s6)
 ; RV32IM-NEXT:    sw a0, 24(s6)
 ; RV32IM-NEXT:    sw s3, 20(s6)
@@ -930,16 +930,16 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    call __umoddi3
 ; RV64I-NEXT:    sd a0, 24(s2)
 ; RV64I-NEXT:    sd s1, 16(s2)
 ; RV64I-NEXT:    sd s3, 8(s2)
diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 67d1bfac4d614b..8adce4bc466dbe 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -269,7 +269,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    andi a0, a0, -16
 ; ILP32-ILP32F-FPELIM-NEXT:    sub a0, sp, a0
 ; ILP32-ILP32F-FPELIM-NEXT:    mv sp, a0
-; ILP32-ILP32F-FPELIM-NEXT:    call notdead at plt
+; ILP32-ILP32F-FPELIM-NEXT:    call notdead
 ; ILP32-ILP32F-FPELIM-NEXT:    mv a0, s1
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, s0, -16
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -299,7 +299,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    andi a0, a0, -16
 ; ILP32-ILP32F-WITHFP-NEXT:    sub a0, sp, a0
 ; ILP32-ILP32F-WITHFP-NEXT:    mv sp, a0
-; ILP32-ILP32F-WITHFP-NEXT:    call notdead at plt
+; ILP32-ILP32F-WITHFP-NEXT:    call notdead
 ; ILP32-ILP32F-WITHFP-NEXT:    mv a0, s1
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, s0, -16
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -329,7 +329,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    andi a0, a0, -16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sub a0, sp, a0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv sp, a0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead at plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a0, s1
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, s0, -16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -361,7 +361,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    andi a0, a0, -16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sub a0, sp, a0
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    mv sp, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead at plt
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, s1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, s0, -32
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -393,7 +393,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    andi a0, a0, -16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sub a0, sp, a0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv sp, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead at plt
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, s1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, s0, -32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -419,7 +419,7 @@ define void @va1_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-FPELIM-NEXT:    li a4, 2
 ; ILP32-ILP32F-FPELIM-NEXT:    li a2, 0
-; ILP32-ILP32F-FPELIM-NEXT:    call va1 at plt
+; ILP32-ILP32F-FPELIM-NEXT:    call va1
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
@@ -433,7 +433,7 @@ define void @va1_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-WITHFP-NEXT:    li a4, 2
 ; ILP32-ILP32F-WITHFP-NEXT:    li a2, 0
-; ILP32-ILP32F-WITHFP-NEXT:    call va1 at plt
+; ILP32-ILP32F-WITHFP-NEXT:    call va1
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 16
@@ -446,7 +446,7 @@ define void @va1_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a3, 261888
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a4, 2
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a2, 0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va1 at plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va1
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
@@ -458,7 +458,7 @@ define void @va1_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1023
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a1, 52
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a2, 2
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call va1 at plt
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
@@ -472,7 +472,7 @@ define void @va1_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1023
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a1, 52
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a2, 2
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call va1 at plt
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
@@ -725,7 +725,7 @@ define void @va2_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-FPELIM-NEXT:    li a2, 0
-; ILP32-ILP32F-FPELIM-NEXT:    call va2 at plt
+; ILP32-ILP32F-FPELIM-NEXT:    call va2
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
@@ -738,7 +738,7 @@ define void @va2_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-WITHFP-NEXT:    li a2, 0
-; ILP32-ILP32F-WITHFP-NEXT:    call va2 at plt
+; ILP32-ILP32F-WITHFP-NEXT:    call va2
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 16
@@ -750,7 +750,7 @@ define void @va2_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a3, 261888
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a2, 0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va2 at plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va2
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
@@ -761,7 +761,7 @@ define void @va2_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1023
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a1, 52
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call va2 at plt
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
@@ -774,7 +774,7 @@ define void @va2_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1023
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a1, 52
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call va2 at plt
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
@@ -1040,7 +1040,7 @@ define void @va3_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a5, 262144
 ; ILP32-ILP32F-FPELIM-NEXT:    li a2, 0
 ; ILP32-ILP32F-FPELIM-NEXT:    li a4, 0
-; ILP32-ILP32F-FPELIM-NEXT:    call va3 at plt
+; ILP32-ILP32F-FPELIM-NEXT:    call va3
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
@@ -1056,7 +1056,7 @@ define void @va3_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a5, 262144
 ; ILP32-ILP32F-WITHFP-NEXT:    li a2, 0
 ; ILP32-ILP32F-WITHFP-NEXT:    li a4, 0
-; ILP32-ILP32F-WITHFP-NEXT:    call va3 at plt
+; ILP32-ILP32F-WITHFP-NEXT:    call va3
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 16
@@ -1071,7 +1071,7 @@ define void @va3_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a5, 262144
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a2, 0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a4, 0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va3 at plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va3
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
@@ -1084,7 +1084,7 @@ define void @va3_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a2, 62
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1111
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call va3 at plt
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va3
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
@@ -1099,7 +1099,7 @@ define void @va3_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a2, 62
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1111
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call va3 at plt
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va3
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
@@ -1127,7 +1127,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, sp, 24
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 4(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 0(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    call notdead at plt
+; ILP32-ILP32F-FPELIM-NEXT:    call notdead
 ; ILP32-ILP32F-FPELIM-NEXT:    lw a0, 4(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, a0, 3
 ; ILP32-ILP32F-FPELIM-NEXT:    andi a0, a0, -4
@@ -1170,7 +1170,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, s0, 8
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, -16(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, -20(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    call notdead at plt
+; ILP32-ILP32F-WITHFP-NEXT:    call notdead
 ; ILP32-ILP32F-WITHFP-NEXT:    lw a0, -16(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, a0, 3
 ; ILP32-ILP32F-WITHFP-NEXT:    andi a0, a0, -4
@@ -1212,7 +1212,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, sp, 24
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 4(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 0(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead at plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw a0, 4(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, a0, 3
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    andi a0, a0, -4
@@ -1253,7 +1253,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, sp, 48
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead at plt
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a0, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 3
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    andi a0, a0, -4
@@ -1296,7 +1296,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, s0, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -32(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead at plt
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a0, -32(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 3
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    andi a0, a0, -4
@@ -1384,7 +1384,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    li a4, 13
 ; ILP32-ILP32F-FPELIM-NEXT:    li a7, 4
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a5, 32(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
+; ILP32-ILP32F-FPELIM-NEXT:    call va5_aligned_stack_callee
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 64
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
@@ -1429,7 +1429,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    li a4, 13
 ; ILP32-ILP32F-WITHFP-NEXT:    li a7, 4
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a5, -32(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    call va5_aligned_stack_callee at plt
+; ILP32-ILP32F-WITHFP-NEXT:    call va5_aligned_stack_callee
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 64
@@ -1473,7 +1473,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a4, 13
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a7, 4
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a5, 32(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va5_aligned_stack_callee
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
@@ -1503,7 +1503,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a5, 13
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    li a7, 14
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd t0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va5_aligned_stack_callee
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 48
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
@@ -1535,7 +1535,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a5, 13
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    li a7, 14
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd t0, 0(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call va5_aligned_stack_callee at plt
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va5_aligned_stack_callee
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 48
diff --git a/llvm/test/CodeGen/RISCV/vlenb.ll b/llvm/test/CodeGen/RISCV/vlenb.ll
index 6ce7f537262368..1d6c1b5d1acbdc 100644
--- a/llvm/test/CodeGen/RISCV/vlenb.ll
+++ b/llvm/test/CodeGen/RISCV/vlenb.ll
@@ -53,7 +53,7 @@ define i32 @sink_to_use_call() {
 ; CHECK-NEXT:    .cfi_offset ra, -4
 ; CHECK-NEXT:    .cfi_offset s0, -8
 ; CHECK-NEXT:    csrr s0, vlenb
-; CHECK-NEXT:    call unknown at plt
+; CHECK-NEXT:    call unknown
 ; CHECK-NEXT:    mv a0, s0
 ; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -75,7 +75,7 @@ define void @machine_licm() {
 ; CHECK-NEXT:  .LBB4_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    call use at plt
+; CHECK-NEXT:    call use
 ; CHECK-NEXT:    j .LBB4_1
 entry:
   br label %loop
diff --git a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
index 74bf1a8929cbc7..6d1521c719edd3 100644
--- a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
@@ -218,11 +218,11 @@ define i1 @flo(float %c, float %a, float %b) {
 ; CHECK-RV64I-NEXT:    mv s1, a0
 ; CHECK-RV64I-NEXT:    mv a0, a1
 ; CHECK-RV64I-NEXT:    mv a1, s1
-; CHECK-RV64I-NEXT:    call __gesf2 at plt
+; CHECK-RV64I-NEXT:    call __gesf2
 ; CHECK-RV64I-NEXT:    mv s2, a0
 ; CHECK-RV64I-NEXT:    mv a0, s0
 ; CHECK-RV64I-NEXT:    mv a1, s1
-; CHECK-RV64I-NEXT:    call __gesf2 at plt
+; CHECK-RV64I-NEXT:    call __gesf2
 ; CHECK-RV64I-NEXT:    or a0, s2, a0
 ; CHECK-RV64I-NEXT:    slti a0, a0, 0
 ; CHECK-RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -264,11 +264,11 @@ define i1 @dlo(double %c, double %a, double %b) {
 ; CHECK-NEXT:    mv s1, a0
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:    mv a1, s1
-; CHECK-NEXT:    call __gedf2 at plt
+; CHECK-NEXT:    call __gedf2
 ; CHECK-NEXT:    mv s2, a0
 ; CHECK-NEXT:    mv a0, s0
 ; CHECK-NEXT:    mv a1, s1
-; CHECK-NEXT:    call __gedf2 at plt
+; CHECK-NEXT:    call __gedf2
 ; CHECK-NEXT:    or a0, s2, a0
 ; CHECK-NEXT:    slti a0, a0, 0
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
index 72213019b8c872..93f95e9709b6a4 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
@@ -15,7 +15,7 @@ define float @foo(float %arg) {
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    .cfi_offset fs0, -20
 ; RV32-NEXT:    fmv.s fs0, fa0
-; RV32-NEXT:    call callee at plt
+; RV32-NEXT:    call callee
 ; RV32-NEXT:    fmv.s fa0, fs0
 ; RV32-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    cm.popret {ra}, 32
@@ -28,7 +28,7 @@ define float @foo(float %arg) {
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    .cfi_offset fs0, -20
 ; RV64-NEXT:    fmv.s fs0, fa0
-; RV64-NEXT:    call callee at plt
+; RV64-NEXT:    call callee
 ; RV64-NEXT:    fmv.s fa0, fs0
 ; RV64-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV64-NEXT:    cm.popret {ra}, 32
@@ -48,12 +48,12 @@ define void @foo2(i32 %x, float %y) {
 ; RV32-NEXT:    .cfi_offset fs0, -20
 ; RV32-NEXT:    fmv.s fs0, fa0
 ; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    call bar
 ; RV32-NEXT:    mv a0, s0
 ; RV32-NEXT:    fmv.s fa0, fs0
 ; RV32-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    cm.pop {ra, s0}, 32
-; RV32-NEXT:    tail func at plt
+; RV32-NEXT:    tail func
 ;
 ; RV64-LABEL: foo2:
 ; RV64:       # %bb.0: # %entry
@@ -65,12 +65,12 @@ define void @foo2(i32 %x, float %y) {
 ; RV64-NEXT:    .cfi_offset fs0, -20
 ; RV64-NEXT:    fmv.s fs0, fa0
 ; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    call bar
 ; RV64-NEXT:    mv a0, s0
 ; RV64-NEXT:    fmv.s fa0, fs0
 ; RV64-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV64-NEXT:    cm.pop {ra, s0}, 32
-; RV64-NEXT:    tail func at plt
+; RV64-NEXT:    tail func
 entry:
   tail call void @bar()
   tail call void @func(i32 %x, float %y)
diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
index 4d573d672bbdc9..348ca8e5296218 100644
--- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
@@ -68,7 +68,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call floorf at plt
+; RV32IZFH-NEXT:    call floorf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -79,7 +79,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call floorf at plt
+; RV64IZFH-NEXT:    call floorf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -90,7 +90,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call floorf at plt
+; RV32IZHINX-NEXT:    call floorf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -101,7 +101,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call floorf at plt
+; RV64IZHINX-NEXT:    call floorf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -112,7 +112,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call floorf at plt
+; RV32IZDINXZHINX-NEXT:    call floorf
 ; RV32IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -123,7 +123,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT:    call floorf at plt
+; RV64IZDINXZHINX-NEXT:    call floorf
 ; RV64IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -140,7 +140,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call ceilf at plt
+; RV32IZFH-NEXT:    call ceilf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -151,7 +151,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call ceilf at plt
+; RV64IZFH-NEXT:    call ceilf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -162,7 +162,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call ceilf at plt
+; RV32IZHINX-NEXT:    call ceilf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -173,7 +173,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call ceilf at plt
+; RV64IZHINX-NEXT:    call ceilf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -184,7 +184,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call ceilf at plt
+; RV32IZDINXZHINX-NEXT:    call ceilf
 ; RV32IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -195,7 +195,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT:    call ceilf at plt
+; RV64IZDINXZHINX-NEXT:    call ceilf
 ; RV64IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -212,7 +212,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call truncf at plt
+; RV32IZFH-NEXT:    call truncf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -223,7 +223,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call truncf at plt
+; RV64IZFH-NEXT:    call truncf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -234,7 +234,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call truncf at plt
+; RV32IZHINX-NEXT:    call truncf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -245,7 +245,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call truncf at plt
+; RV64IZHINX-NEXT:    call truncf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -256,7 +256,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call truncf at plt
+; RV32IZDINXZHINX-NEXT:    call truncf
 ; RV32IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -267,7 +267,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT:    call truncf at plt
+; RV64IZDINXZHINX-NEXT:    call truncf
 ; RV64IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -284,7 +284,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call rintf at plt
+; RV32IZFH-NEXT:    call rintf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -295,7 +295,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call rintf at plt
+; RV64IZFH-NEXT:    call rintf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -306,7 +306,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call rintf at plt
+; RV32IZHINX-NEXT:    call rintf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -317,7 +317,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call rintf at plt
+; RV64IZHINX-NEXT:    call rintf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -328,7 +328,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call rintf at plt
+; RV32IZDINXZHINX-NEXT:    call rintf
 ; RV32IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -339,7 +339,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT:    call rintf at plt
+; RV64IZDINXZHINX-NEXT:    call rintf
 ; RV64IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -356,7 +356,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call nearbyintf at plt
+; RV32IZFH-NEXT:    call nearbyintf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -367,7 +367,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call nearbyintf at plt
+; RV64IZFH-NEXT:    call nearbyintf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -378,7 +378,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call nearbyintf at plt
+; RV32IZHINX-NEXT:    call nearbyintf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -389,7 +389,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call nearbyintf at plt
+; RV64IZHINX-NEXT:    call nearbyintf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -400,7 +400,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call nearbyintf at plt
+; RV32IZDINXZHINX-NEXT:    call nearbyintf
 ; RV32IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -411,7 +411,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT:    call nearbyintf at plt
+; RV64IZDINXZHINX-NEXT:    call nearbyintf
 ; RV64IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -428,7 +428,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call roundf at plt
+; RV32IZFH-NEXT:    call roundf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -439,7 +439,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call roundf at plt
+; RV64IZFH-NEXT:    call roundf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -450,7 +450,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call roundf at plt
+; RV32IZHINX-NEXT:    call roundf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -461,7 +461,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call roundf at plt
+; RV64IZHINX-NEXT:    call roundf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -472,7 +472,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call roundf at plt
+; RV32IZDINXZHINX-NEXT:    call roundf
 ; RV32IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -483,7 +483,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT:    call roundf at plt
+; RV64IZDINXZHINX-NEXT:    call roundf
 ; RV64IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -500,7 +500,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call roundevenf at plt
+; RV32IZFH-NEXT:    call roundevenf
 ; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -511,7 +511,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call roundevenf at plt
+; RV64IZFH-NEXT:    call roundevenf
 ; RV64IZFH-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -522,7 +522,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call roundevenf at plt
+; RV32IZHINX-NEXT:    call roundevenf
 ; RV32IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
@@ -533,7 +533,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV64IZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINX-NEXT:    call roundevenf at plt
+; RV64IZHINX-NEXT:    call roundevenf
 ; RV64IZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINX-NEXT:    addi sp, sp, 16
@@ -544,7 +544,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call roundevenf at plt
+; RV32IZDINXZHINX-NEXT:    call roundevenf
 ; RV32IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -555,7 +555,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT:    call roundevenf at plt
+; RV64IZDINXZHINX-NEXT:    call roundevenf
 ; RV64IZDINXZHINX-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
@@ -644,7 +644,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call llrintf at plt
+; RV32IZFH-NEXT:    call llrintf
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -659,7 +659,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call llrintf at plt
+; RV32IZHINX-NEXT:    call llrintf
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -674,7 +674,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call llrintf at plt
+; RV32IZDINXZHINX-NEXT:    call llrintf
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
@@ -695,7 +695,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call llroundf at plt
+; RV32IZFH-NEXT:    call llroundf
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
@@ -710,7 +710,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
 ; RV32IZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINX-NEXT:    call llroundf at plt
+; RV32IZHINX-NEXT:    call llroundf
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZHINX-NEXT:    ret
@@ -725,7 +725,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINX-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT:    call llroundf at plt
+; RV32IZDINXZHINX-NEXT:    call llroundf
 ; RV32IZDINXZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINX-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
index 0475b941ec0ffe..097d1e0f6ee55b 100644
--- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
@@ -80,7 +80,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call floorf at plt
+; RV32IZFHMIN-NEXT:    call floorf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -91,7 +91,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call floorf at plt
+; RV64IZFHMIN-NEXT:    call floorf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -102,7 +102,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call floorf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call floorf
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -113,7 +113,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-STRICT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT:    call floorf at plt
+; RV64IZHINXMIN-STRICT-NEXT:    call floorf
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-STRICT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -124,7 +124,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call floorf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call floorf
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -135,7 +135,7 @@ define half @floor_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT:    call floorf at plt
+; RV64IZDINXZHINXMIN-NEXT:    call floorf
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -152,7 +152,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call ceilf at plt
+; RV32IZFHMIN-NEXT:    call ceilf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -163,7 +163,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call ceilf at plt
+; RV64IZFHMIN-NEXT:    call ceilf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -174,7 +174,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call ceilf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call ceilf
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -185,7 +185,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-STRICT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT:    call ceilf at plt
+; RV64IZHINXMIN-STRICT-NEXT:    call ceilf
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-STRICT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -196,7 +196,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call ceilf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call ceilf
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -207,7 +207,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT:    call ceilf at plt
+; RV64IZDINXZHINXMIN-NEXT:    call ceilf
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -224,7 +224,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call truncf at plt
+; RV32IZFHMIN-NEXT:    call truncf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -235,7 +235,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call truncf at plt
+; RV64IZFHMIN-NEXT:    call truncf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -246,7 +246,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call truncf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call truncf
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -257,7 +257,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-STRICT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT:    call truncf at plt
+; RV64IZHINXMIN-STRICT-NEXT:    call truncf
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-STRICT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -268,7 +268,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call truncf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call truncf
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -279,7 +279,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT:    call truncf at plt
+; RV64IZDINXZHINXMIN-NEXT:    call truncf
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -296,7 +296,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call rintf at plt
+; RV32IZFHMIN-NEXT:    call rintf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -307,7 +307,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call rintf at plt
+; RV64IZFHMIN-NEXT:    call rintf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -318,7 +318,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call rintf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call rintf
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -329,7 +329,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-STRICT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT:    call rintf at plt
+; RV64IZHINXMIN-STRICT-NEXT:    call rintf
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-STRICT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -340,7 +340,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call rintf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call rintf
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -351,7 +351,7 @@ define half @rint_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT:    call rintf at plt
+; RV64IZDINXZHINXMIN-NEXT:    call rintf
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -368,7 +368,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call nearbyintf at plt
+; RV32IZFHMIN-NEXT:    call nearbyintf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -379,7 +379,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call nearbyintf at plt
+; RV64IZFHMIN-NEXT:    call nearbyintf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -390,7 +390,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call nearbyintf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call nearbyintf
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -401,7 +401,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-STRICT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT:    call nearbyintf at plt
+; RV64IZHINXMIN-STRICT-NEXT:    call nearbyintf
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-STRICT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -412,7 +412,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call nearbyintf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call nearbyintf
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -423,7 +423,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT:    call nearbyintf at plt
+; RV64IZDINXZHINXMIN-NEXT:    call nearbyintf
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -440,7 +440,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call roundf at plt
+; RV32IZFHMIN-NEXT:    call roundf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -451,7 +451,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call roundf at plt
+; RV64IZFHMIN-NEXT:    call roundf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -462,7 +462,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call roundf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call roundf
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -473,7 +473,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-STRICT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT:    call roundf at plt
+; RV64IZHINXMIN-STRICT-NEXT:    call roundf
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-STRICT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -484,7 +484,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call roundf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call roundf
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -495,7 +495,7 @@ define half @round_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT:    call roundf at plt
+; RV64IZDINXZHINXMIN-NEXT:    call roundf
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -512,7 +512,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call roundevenf at plt
+; RV32IZFHMIN-NEXT:    call roundevenf
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -523,7 +523,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT:    call roundevenf at plt
+; RV64IZFHMIN-NEXT:    call roundevenf
 ; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa0
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -534,7 +534,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call roundevenf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call roundevenf
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -545,7 +545,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV64IZHINXMIN-STRICT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT:    call roundevenf at plt
+; RV64IZHINXMIN-STRICT-NEXT:    call roundevenf
 ; RV64IZHINXMIN-STRICT-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-STRICT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
@@ -556,7 +556,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call roundevenf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call roundevenf
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -567,7 +567,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV64IZDINXZHINXMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT:    call roundevenf at plt
+; RV64IZDINXZHINXMIN-NEXT:    call roundevenf
 ; RV64IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
@@ -668,7 +668,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call llrintf at plt
+; RV32IZFHMIN-NEXT:    call llrintf
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -684,7 +684,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call llrintf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call llrintf
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-STRICT-NEXT:    ret
@@ -700,7 +700,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call llrintf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call llrintf
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINXMIN-NEXT:    ret
@@ -722,7 +722,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT:    call llroundf at plt
+; RV32IZFHMIN-NEXT:    call llroundf
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
@@ -738,7 +738,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, -16
 ; RV32IZHINXMIN-STRICT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZHINXMIN-STRICT-NEXT:    fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT:    call llroundf at plt
+; RV32IZHINXMIN-STRICT-NEXT:    call llroundf
 ; RV32IZHINXMIN-STRICT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZHINXMIN-STRICT-NEXT:    addi sp, sp, 16
 ; RV32IZHINXMIN-STRICT-NEXT:    ret
@@ -754,7 +754,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, -16
 ; RV32IZDINXZHINXMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT:    call llroundf at plt
+; RV32IZDINXZHINXMIN-NEXT:    call llroundf
 ; RV32IZDINXZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV32IZDINXZHINXMIN-NEXT:    ret
diff --git a/llvm/test/MC/RISCV/function-call.s b/llvm/test/MC/RISCV/function-call.s
index e0650593ec91f9..1521ae7e55e199 100644
--- a/llvm/test/MC/RISCV/function-call.s
+++ b/llvm/test/MC/RISCV/function-call.s
@@ -50,7 +50,7 @@ call foo at plt
 # RELOC: R_RISCV_CALL_PLT foo 0x0
 # INSTR: auipc ra, 0
 # INSTR: jalr  ra
-# FIXUP: fixup A - offset: 0, value: foo at plt, kind: fixup_riscv_call_plt
+# FIXUP: fixup A - offset: 0, value: foo, kind: fixup_riscv_call_plt
 
 # Ensure that an explicit register operand can be parsed.
 
@@ -64,4 +64,4 @@ call a0, foo at plt
 # RELOC: R_RISCV_CALL_PLT foo 0x0
 # INSTR: auipc a0, 0
 # INSTR: jalr  a0
-# FIXUP: fixup A - offset: 0, value: foo at plt, kind: fixup_riscv_call_plt
+# FIXUP: fixup A - offset: 0, value: foo, kind: fixup_riscv_call_plt
diff --git a/llvm/test/MC/RISCV/tail-call.s b/llvm/test/MC/RISCV/tail-call.s
index 3670c7749ab170..c94af672edda2a 100644
--- a/llvm/test/MC/RISCV/tail-call.s
+++ b/llvm/test/MC/RISCV/tail-call.s
@@ -50,4 +50,4 @@ tail foo at plt
 # RELOC: R_RISCV_CALL_PLT foo 0x0
 # INSTR: auipc t1, 0
 # INSTR: jr  t1
-# FIXUP: fixup A - offset: 0, value: foo at plt, kind:
+# FIXUP: fixup A - offset: 0, value: foo, kind: fixup_riscv_call_plt



More information about the llvm-commits mailing list