[llvm] ed078c4 - [LoongArch] Add insn aliases `jr` and `ret`

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 10 19:03:04 PDT 2022


Author: WANG Xuerui
Date: 2022-08-11T10:02:45+08:00
New Revision: ed078c48f0d7b499a4565d4da2dde22a4dbf19d9

URL: https://github.com/llvm/llvm-project/commit/ed078c48f0d7b499a4565d4da2dde22a4dbf19d9
DIFF: https://github.com/llvm/llvm-project/commit/ed078c48f0d7b499a4565d4da2dde22a4dbf19d9.diff

LOG: [LoongArch] Add insn aliases `jr` and `ret`

Differential Revision: https://reviews.llvm.org/D131512

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/test/CodeGen/LoongArch/analyze-branch.ll
    llvm/test/CodeGen/LoongArch/bitreverse.ll
    llvm/test/CodeGen/LoongArch/bstrins_d.ll
    llvm/test/CodeGen/LoongArch/bstrins_w.ll
    llvm/test/CodeGen/LoongArch/bstrpick_d.ll
    llvm/test/CodeGen/LoongArch/bstrpick_w.ll
    llvm/test/CodeGen/LoongArch/bswap-bitreverse.ll
    llvm/test/CodeGen/LoongArch/bswap.ll
    llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
    llvm/test/CodeGen/LoongArch/double-imm.ll
    llvm/test/CodeGen/LoongArch/float-imm.ll
    llvm/test/CodeGen/LoongArch/frame.ll
    llvm/test/CodeGen/LoongArch/imm.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/and.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fneg.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/or.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/sext-zext-trunc.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll
    llvm/test/CodeGen/LoongArch/not.ll
    llvm/test/CodeGen/LoongArch/rotl-rotr.ll
    llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll
    llvm/test/CodeGen/LoongArch/vararg.ll
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_function_name.ll.expected
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.generated.expected
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.nogenerated.expected

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 8ff4f11e35419..33502141345ae 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -966,6 +966,10 @@ def ADJCALLSTACKUP   : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
 
 def : InstAlias<"nop", (ANDI R0, R0, 0)>;
 def : InstAlias<"move $dst, $src", (OR GPR:$dst, GPR:$src, R0)>;
+// `ret` is supported since binutils commit 20f2e2686c79a5ac (version 2.40 and
+// later).
+def : InstAlias<"ret", (JIRL R0, R1, 0)>;
+def : InstAlias<"jr $rj", (JIRL R0, GPR:$rj, 0)>;
 
 //===----------------------------------------------------------------------===//
 // Basic Floating-Point Instructions

diff  --git a/llvm/test/CodeGen/LoongArch/analyze-branch.ll b/llvm/test/CodeGen/LoongArch/analyze-branch.ll
index 8a39b505d33c9..7ce61f84df384 100644
--- a/llvm/test/CodeGen/LoongArch/analyze-branch.ll
+++ b/llvm/test/CodeGen/LoongArch/analyze-branch.ll
@@ -23,7 +23,7 @@ define void @test_bcc_fallthrough_taken(i64 %in) nounwind {
 ; CHECK-NEXT:  .LBB0_2: # %true
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_3: # %false
 ; CHECK-NEXT:    bl test_false
 ; CHECK-NEXT:    b .LBB0_2
@@ -55,7 +55,7 @@ define void @test_bcc_fallthrough_nottaken(i64 %in) nounwind {
 ; CHECK-NEXT:  .LBB1_2: # %true
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB1_1: # %true
 ; CHECK-NEXT:    bl test_true
 ; CHECK-NEXT:    b .LBB1_2

diff  --git a/llvm/test/CodeGen/LoongArch/bitreverse.ll b/llvm/test/CodeGen/LoongArch/bitreverse.ll
index eda139fcd33ba..8cc731eaa7a54 100644
--- a/llvm/test/CodeGen/LoongArch/bitreverse.ll
+++ b/llvm/test/CodeGen/LoongArch/bitreverse.ll
@@ -18,12 +18,12 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
 ; LA32-LABEL: test_bitreverse_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.4b $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.4b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i8 @llvm.bitreverse.i8(i8 %a)
   ret i8 %tmp
 }
@@ -33,13 +33,13 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.w $a0, $a0
 ; LA32-NEXT:    srli.w $a0, $a0, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.d $a0, $a0
 ; LA64-NEXT:    srli.d $a0, $a0, 48
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
   ret i16 %tmp
 }
@@ -48,12 +48,12 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; LA32-LABEL: test_bitreverse_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.w $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.w $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
   ret i32 %tmp
 }
@@ -64,12 +64,12 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; LA32-NEXT:    bitrev.w $a2, $a1
 ; LA32-NEXT:    bitrev.w $a1, $a0
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.d $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
   ret i64 %tmp
 }
@@ -81,13 +81,13 @@ define i7 @test_bitreverse_i7(i7 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.w $a0, $a0
 ; LA32-NEXT:    srli.w $a0, $a0, 25
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i7:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.d $a0, $a0
 ; LA64-NEXT:    srli.d $a0, $a0, 57
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i7 @llvm.bitreverse.i7(i7 %a)
   ret i7 %tmp
 }
@@ -97,13 +97,13 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.w $a0, $a0
 ; LA32-NEXT:    srli.w $a0, $a0, 8
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i24:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.d $a0, $a0
 ; LA64-NEXT:    srli.d $a0, $a0, 40
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i24 @llvm.bitreverse.i24(i24 %a)
   ret i24 %tmp
 }
@@ -117,13 +117,13 @@ define i48 @test_bitreverse_i48(i48 %a) nounwind {
 ; LA32-NEXT:    slli.w $a0, $a2, 16
 ; LA32-NEXT:    or $a0, $a1, $a0
 ; LA32-NEXT:    srli.w $a1, $a2, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i48:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.d $a0, $a0
 ; LA64-NEXT:    srli.d $a0, $a0, 16
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i48 @llvm.bitreverse.i48(i48 %a)
   ret i48 %tmp
 }
@@ -147,7 +147,7 @@ define i77 @test_bitreverse_i77(i77 %a) nounwind {
 ; LA32-NEXT:    srli.w $a1, $a1, 19
 ; LA32-NEXT:    or $a1, $a1, $a2
 ; LA32-NEXT:    st.w $a1, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i77:
 ; LA64:       # %bb.0:
@@ -157,7 +157,7 @@ define i77 @test_bitreverse_i77(i77 %a) nounwind {
 ; LA64-NEXT:    slli.d $a0, $a2, 13
 ; LA64-NEXT:    or $a0, $a1, $a0
 ; LA64-NEXT:    srli.d $a1, $a2, 51
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i77 @llvm.bitreverse.i77(i77 %a)
   ret i77 %tmp
 }
@@ -177,14 +177,14 @@ define i128 @test_bitreverse_i128(i128 %a) nounwind {
 ; LA32-NEXT:    ld.w $a1, $a1, 12
 ; LA32-NEXT:    bitrev.w $a1, $a1
 ; LA32-NEXT:    st.w $a1, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_i128:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.d $a2, $a1
 ; LA64-NEXT:    bitrev.d $a1, $a0
 ; LA64-NEXT:    move $a0, $a2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i128 @llvm.bitreverse.i128(i128 %a)
   ret i128 %tmp
 }

diff  --git a/llvm/test/CodeGen/LoongArch/bstrins_d.ll b/llvm/test/CodeGen/LoongArch/bstrins_d.ll
index 342e044c7a7be..fe1f6270f966d 100644
--- a/llvm/test/CodeGen/LoongArch/bstrins_d.ll
+++ b/llvm/test/CodeGen/LoongArch/bstrins_d.ll
@@ -13,7 +13,7 @@ define i64 @pat1(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: pat1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 39, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i64 %a, -1099511562241  ; 0xffffff000000ffff
   %shl = shl i64 %b, 16
   %and2 = and i64 %shl, 1099511562240 ; 0x000000ffffff0000
@@ -25,7 +25,7 @@ define i64 @pat1_swap(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: pat1_swap:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 39, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i64 %a, -1099511562241  ; 0xffffff000000ffff
   %shl = shl i64 %b, 16
   %and2 = and i64 %shl, 1099511562240 ; 0x000000ffffff0000
@@ -41,7 +41,7 @@ define i64 @pat2(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: pat2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 39, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i64 %a, -1099511562241 ; 0xffffff000000ffff
   %and2 = and i64 %b, 16777215       ; 0x0000000000ffffff
   %shl = shl i64 %and2, 16
@@ -53,7 +53,7 @@ define i64 @pat2_swap(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: pat2_swap:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 39, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i64 %a, -1099511562241 ; 0xffffff000000ffff
   %and2 = and i64 %b, 16777215       ; 0x0000000000ffffff
   %shl = shl i64 %and2, 16
@@ -71,7 +71,7 @@ define i64 @pat3(i64 %a, i64 %b) nounwind {
 ; CHECK-NEXT:    andi $a1, $a1, 288
 ; CHECK-NEXT:    srli.d $a1, $a1, 4
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 11, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i64 %a, -4081 ; 0xfffffffffffff00f
   %and2 = and i64 %b, 288   ; 0x0000000000000120
   %or = or i64 %and1, %and2
@@ -84,7 +84,7 @@ define i64 @pat3_swap(i64 %a, i64 %b) nounwind {
 ; CHECK-NEXT:    andi $a1, $a1, 288
 ; CHECK-NEXT:    srli.d $a1, $a1, 4
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 11, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i64 %a, -4081 ; 0xfffffffffffff00f
   %and2 = and i64 %b, 288   ; 0x0000000000000120
   %or = or i64 %and2, %and1
@@ -99,7 +99,7 @@ define i64 @pat4(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: pat4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 63, 8
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 255
   %shl = shl i64 %b, 8
   %or = or i64 %and, %shl
@@ -110,7 +110,7 @@ define i64 @pat4_swap(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: pat4_swap:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 63, 8
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 255
   %shl = shl i64 %b, 8
   %or = or i64 %shl, %and
@@ -127,7 +127,7 @@ define i64 @pat5(i64 %a) nounwind {
 ; CHECK-NEXT:    lu12i.w $a1, 74565
 ; CHECK-NEXT:    ori $a1, $a1, 1656
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 47, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 18446462598732906495 ; 0xffff00000000ffff
   %or = or i64 %and, 20015998304256       ; 0x0000123456780000
   ret i64 %or
@@ -146,7 +146,7 @@ define i64 @pat6(i64 %c) nounwind {
 ; CHECK-NEXT:    lu52i.d $a1, $a1, 291
 ; CHECK-NEXT:    bstrins.d $a1, $a0, 39, 16
 ; CHECK-NEXT:    move $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %c, 16777215            ; 0x0000000000ffffff
   %shl = shl i64 %and, 16
   %or = or i64 %shl, 1311767949471676570 ; 0x123456000000789a
@@ -164,7 +164,7 @@ define i64 @pat7(i64 %c) nounwind {
 ; CHECK-NEXT:    lu52i.d $a1, $a1, 291
 ; CHECK-NEXT:    bstrins.d $a1, $a0, 39, 16
 ; CHECK-NEXT:    move $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %shl = shl i64 %c, 16
   %and = and i64 %shl, 1099511562240     ; 0x000000ffffff0000
   %or = or i64 %and, 1311767949471676570 ; 0x123456000000789a
@@ -182,7 +182,7 @@ define i64 @pat8(i64 %c) nounwind {
 ; CHECK-NEXT:    lu32i.d $a0, 284160
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 291
 ; CHECK-NEXT:    bstrins.d $a0, $a1, 39, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %c, 1099511562240       ; 0x000000ffffff0000
   %or = or i64 %and, 1311767949471676570 ; 0x123456000000789a
   ret i64 %or
@@ -200,7 +200,7 @@ define i64 @no_bstrins_d(i64 %a) nounwind {
 ; CHECK-NEXT:    ori $a1, $a1, 4095
 ; CHECK-NEXT:    lu32i.d $a1, -60876
 ; CHECK-NEXT:    and $a0, $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 18446462598732906495 ; 0xffff00000000ffff
   %or = or i64 %and, 20015998341120       ; 0x0000123456789000
   ret i64 %or

diff  --git a/llvm/test/CodeGen/LoongArch/bstrins_w.ll b/llvm/test/CodeGen/LoongArch/bstrins_w.ll
index 47c4d826c2ee5..dfbe000841cdc 100644
--- a/llvm/test/CodeGen/LoongArch/bstrins_w.ll
+++ b/llvm/test/CodeGen/LoongArch/bstrins_w.ll
@@ -13,7 +13,7 @@ define i32 @pat1(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: pat1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 19, 8
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i32 %a, -1048321  ; 0xfff000ff
   %shl = shl i32 %b, 8
   %and2 = and i32 %shl, 1048320 ; 0x000fff00
@@ -25,7 +25,7 @@ define i32 @pat1_swap(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: pat1_swap:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 19, 8
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i32 %a, -1048321  ; 0xfff000ff
   %shl = shl i32 %b, 8
   %and2 = and i32 %shl, 1048320 ; 0x000fff00
@@ -41,7 +41,7 @@ define i32 @pat2(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: pat2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 19, 8
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i32 %a, -1048321 ; 0xfff000ff
   %and2 = and i32 %b, 4095     ; 0x00000fff
   %shl = shl i32 %and2, 8
@@ -53,7 +53,7 @@ define i32 @pat2_swap(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: pat2_swap:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 19, 8
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i32 %a, -1048321 ; 0xfff000ff
   %and2 = and i32 %b, 4095     ; 0x00000fff
   %shl = shl i32 %and2, 8
@@ -71,7 +71,7 @@ define i32 @pat3(i32 %a, i32 %b) nounwind {
 ; CHECK-NEXT:    andi $a1, $a1, 288
 ; CHECK-NEXT:    srli.w $a1, $a1, 4
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 11, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i32 %a, -4081 ; 0xfffff00f
   %and2 = and i32 %b, 288   ; 0x00000120
   %or = or i32 %and1, %and2
@@ -84,7 +84,7 @@ define i32 @pat3_swap(i32 %a, i32 %b) nounwind {
 ; CHECK-NEXT:    andi $a1, $a1, 288
 ; CHECK-NEXT:    srli.w $a1, $a1, 4
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 11, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i32 %a, -4081 ; 0xfffff00f
   %and2 = and i32 %b, 288   ; 0x00000120
   %or = or i32 %and2, %and1
@@ -96,7 +96,7 @@ define i32 @pat3_positive_mask0(i32 %a, i32 %b) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    srli.w $a1, $a1, 28
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 31, 28
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and1 = and i32 %a, 268435455  ; 0x0fffffff
   %and2 = and i32 %b, 4026531840 ; 0xf0000000
   %or = or i32 %and1, %and2
@@ -111,7 +111,7 @@ define i32 @pat4(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: pat4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 31, 28
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 268435455 ; 0x0fffffff
   %shl = shl i32 %b, 28
   %or = or i32 %and, %shl
@@ -122,7 +122,7 @@ define i32 @pat4_swap(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: pat4_swap:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 31, 28
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 268435455 ; 0x0fffffff
   %shl = shl i32 %b, 28
   %or = or i32 %shl, %and
@@ -139,7 +139,7 @@ define i32 @pat5(i32 %a) nounwind {
 ; CHECK-NEXT:    lu12i.w $a1, 1
 ; CHECK-NEXT:    ori $a1, $a1, 564
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 23, 8
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 4278190335 ; 0xff0000ff
   %or = or i32 %and, 1192960    ; 0x00123400
   ret i32 %or
@@ -156,7 +156,7 @@ define i32 @pat6(i32 %c) nounwind {
 ; CHECK-NEXT:    ori $a1, $a1, 2
 ; CHECK-NEXT:    bstrins.w $a1, $a0, 27, 4
 ; CHECK-NEXT:    move $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %c, 16777215  ; 0x00ffffff
   %shl = shl i32 %and, 4
   %or = or i32 %shl, 268435458 ; 0x10000002
@@ -172,7 +172,7 @@ define i32 @pat7(i32 %c) nounwind {
 ; CHECK-NEXT:    ori $a1, $a1, 2
 ; CHECK-NEXT:    bstrins.w $a1, $a0, 27, 4
 ; CHECK-NEXT:    move $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %shl = shl i32 %c, 4
   %and = and i32 %shl, 268435440 ; 0x0ffffff0
   %or = or i32 %and, 268435458   ; 0x10000002
@@ -188,7 +188,7 @@ define i32 @pat8(i32 %c) nounwind {
 ; CHECK-NEXT:    lu12i.w $a0, 65536
 ; CHECK-NEXT:    ori $a0, $a0, 2
 ; CHECK-NEXT:    bstrins.w $a0, $a1, 27, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %c, 268435440 ; 0x0ffffff0
   %or = or i32 %and, 268435458 ; 0x10000002
   ret i32 %or
@@ -205,7 +205,7 @@ define i32 @no_bstrins_w(i32 %a) nounwind {
 ; CHECK-NEXT:    lu12i.w $a1, -3805
 ; CHECK-NEXT:    ori $a1, $a1, 1279
 ; CHECK-NEXT:    and $a0, $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 4278190335 ; 0xff0000ff
   %or = or i32 %and, 1193040    ; 0x00123450
   ret i32 %or

diff  --git a/llvm/test/CodeGen/LoongArch/bstrpick_d.ll b/llvm/test/CodeGen/LoongArch/bstrpick_d.ll
index 51d4967dc3f51..e93c1391d463f 100644
--- a/llvm/test/CodeGen/LoongArch/bstrpick_d.ll
+++ b/llvm/test/CodeGen/LoongArch/bstrpick_d.ll
@@ -1,10 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s
 
 define i64 @lshr40_and255(i64 %a) {
 ; CHECK-LABEL: lshr40_and255:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.d $a0, $a0, 47, 40
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %shr = lshr i64 %a, 40
   %and = and i64 %shr, 255
   ret i64 %and
@@ -14,7 +15,7 @@ define i64 @ashr50_and511(i64 %a) {
 ; CHECK-LABEL: ashr50_and511:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.d $a0, $a0, 58, 50
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %shr = ashr i64 %a, 50
   %and = and i64 %shr, 511
   ret i64 %and
@@ -24,7 +25,7 @@ define i64 @zext_i32_to_i64(i32 %a) {
 ; CHECK-LABEL: zext_i32_to_i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.d $a0, $a0, 31, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %res = zext i32 %a to i64
   ret i64 %res
 }
@@ -33,7 +34,7 @@ define i64 @and8191(i64 %a) {
 ; CHECK-LABEL: and8191:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.d $a0, $a0, 12, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 8191
   ret i64 %and
 }
@@ -43,7 +44,7 @@ define i64 @and4095(i64 %a) {
 ; CHECK-LABEL: and4095:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi $a0, $a0, 4095
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 4095
   ret i64 %and
 }
@@ -53,7 +54,7 @@ define i64 @and0xff0_lshr4(i64 %a) {
 ; CHECK-LABEL: and0xff0_lshr4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.d $a0, $a0, 11, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 4080
   %shr = lshr i64 %and, 4
   ret i64 %shr
@@ -66,7 +67,7 @@ define i64 @and4080_ashr5(i64 %a) {
 ; CHECK-LABEL: and4080_ashr5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.d $a0, $a0, 11, 5
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 4080
   %shr = ashr i64 %and, 5
   ret i64 %shr
@@ -78,7 +79,7 @@ define i64 @and0xf30_lshr4(i64 %a) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi $a0, $a0, 3888
 ; CHECK-NEXT:    srli.d $a0, $a0, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 3888
   %shr = lshr i64 %and, 4
   ret i64 %shr
@@ -90,7 +91,7 @@ define i64 @and0xff0_lshr3(i64 %a) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi $a0, $a0, 4080
 ; CHECK-NEXT:    srli.d $a0, $a0, 3
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i64 %a, 4080
   %shr = lshr i64 %and, 3
   ret i64 %shr

diff  --git a/llvm/test/CodeGen/LoongArch/bstrpick_w.ll b/llvm/test/CodeGen/LoongArch/bstrpick_w.ll
index 92d79019a7e3a..f9027e1fb32df 100644
--- a/llvm/test/CodeGen/LoongArch/bstrpick_w.ll
+++ b/llvm/test/CodeGen/LoongArch/bstrpick_w.ll
@@ -1,10 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s
 
 define i32 @lshr10_and255(i32 %a) {
 ; CHECK-LABEL: lshr10_and255:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.w $a0, $a0, 17, 10
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %shr = lshr i32 %a, 10
   %and = and i32 %shr, 255
   ret i32 %and
@@ -14,7 +15,7 @@ define i32 @ashr20_and511(i32 %a) {
 ; CHECK-LABEL: ashr20_and511:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.w $a0, $a0, 28, 20
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %shr = ashr i32 %a, 20
   %and = and i32 %shr, 511
   ret i32 %and
@@ -24,7 +25,7 @@ define i32 @zext_i16_to_i32(i16 %a) {
 ; CHECK-LABEL: zext_i16_to_i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.w $a0, $a0, 15, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %res = zext i16 %a to i32
   ret i32 %res
 }
@@ -33,7 +34,7 @@ define i32 @and8191(i32 %a) {
 ; CHECK-LABEL: and8191:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.w $a0, $a0, 12, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 8191
   ret i32 %and
 }
@@ -43,7 +44,7 @@ define i32 @and4095(i32 %a) {
 ; CHECK-LABEL: and4095:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi $a0, $a0, 4095
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 4095
   ret i32 %and
 }
@@ -53,7 +54,7 @@ define i32 @and0xff0_lshr4(i32 %a) {
 ; CHECK-LABEL: and0xff0_lshr4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.w $a0, $a0, 11, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 4080
   %shr = lshr i32 %and, 4
   ret i32 %shr
@@ -66,7 +67,7 @@ define i32 @and4080_ashr5(i32 %a) {
 ; CHECK-LABEL: and4080_ashr5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    bstrpick.w $a0, $a0, 11, 5
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 4080
   %shr = ashr i32 %and, 5
   ret i32 %shr
@@ -78,7 +79,7 @@ define i32 @and0xf30_lshr4(i32 %a) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi $a0, $a0, 3888
 ; CHECK-NEXT:    srli.w $a0, $a0, 4
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 3888
   %shr = lshr i32 %and, 4
   ret i32 %shr
@@ -90,7 +91,7 @@ define i32 @and0xff0_lshr3(i32 %a) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi $a0, $a0, 4080
 ; CHECK-NEXT:    srli.w $a0, $a0, 3
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %and = and i32 %a, 4080
   %shr = lshr i32 %and, 3
   ret i32 %shr

diff  --git a/llvm/test/CodeGen/LoongArch/bswap-bitreverse.ll b/llvm/test/CodeGen/LoongArch/bswap-bitreverse.ll
index 3bdb7a98047e4..c99adfbb0574f 100644
--- a/llvm/test/CodeGen/LoongArch/bswap-bitreverse.ll
+++ b/llvm/test/CodeGen/LoongArch/bswap-bitreverse.ll
@@ -17,14 +17,14 @@ define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind {
 ; LA32-NEXT:    revb.2h $a0, $a0
 ; LA32-NEXT:    bitrev.w $a0, $a0
 ; LA32-NEXT:    srli.w $a0, $a0, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_bitreverse_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    revb.2h $a0, $a0
 ; LA64-NEXT:    bitrev.d $a0, $a0
 ; LA64-NEXT:    srli.d $a0, $a0, 48
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i16 @llvm.bswap.i16(i16 %a)
   %tmp2 = call i16 @llvm.bitreverse.i16(i16 %tmp)
   ret i16 %tmp2
@@ -34,12 +34,12 @@ define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind {
 ; LA32-LABEL: test_bswap_bitreverse_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.4b $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_bitreverse_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.4b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i32 @llvm.bswap.i32(i32 %a)
   %tmp2 = call i32 @llvm.bitreverse.i32(i32 %tmp)
   ret i32 %tmp2
@@ -50,12 +50,12 @@ define i64 @test_bswap_bitreverse_i64(i64 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.4b $a0, $a0
 ; LA32-NEXT:    bitrev.4b $a1, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_bitreverse_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.8b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i64 @llvm.bswap.i64(i64 %a)
   %tmp2 = call i64 @llvm.bitreverse.i64(i64 %tmp)
   ret i64 %tmp2
@@ -67,14 +67,14 @@ define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind {
 ; LA32-NEXT:    revb.2h $a0, $a0
 ; LA32-NEXT:    bitrev.w $a0, $a0
 ; LA32-NEXT:    srli.w $a0, $a0, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_bswap_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    revb.2h $a0, $a0
 ; LA64-NEXT:    bitrev.d $a0, $a0
 ; LA64-NEXT:    srli.d $a0, $a0, 48
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
   %tmp2 = call i16 @llvm.bswap.i16(i16 %tmp)
   ret i16 %tmp2
@@ -84,12 +84,12 @@ define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind {
 ; LA32-LABEL: test_bitreverse_bswap_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.4b $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_bswap_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.4b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
   %tmp2 = call i32 @llvm.bswap.i32(i32 %tmp)
   ret i32 %tmp2
@@ -100,12 +100,12 @@ define i64 @test_bitreverse_bswap_i64(i64 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bitrev.4b $a0, $a0
 ; LA32-NEXT:    bitrev.4b $a1, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bitreverse_bswap_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bitrev.8b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
   %tmp2 = call i64 @llvm.bswap.i64(i64 %tmp)
   ret i64 %tmp2
@@ -118,7 +118,7 @@ define i32 @pr55484(i32 %0) {
 ; LA32-NEXT:    srli.w $a0, $a0, 8
 ; LA32-NEXT:    or $a0, $a0, $a1
 ; LA32-NEXT:    ext.w.h $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: pr55484:
 ; LA64:       # %bb.0:
@@ -126,7 +126,7 @@ define i32 @pr55484(i32 %0) {
 ; LA64-NEXT:    srli.d $a0, $a0, 8
 ; LA64-NEXT:    or $a0, $a0, $a1
 ; LA64-NEXT:    ext.w.h $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %2 = lshr i32 %0, 8
   %3 = shl i32 %0, 8
   %4 = or i32 %2, %3

diff  --git a/llvm/test/CodeGen/LoongArch/bswap.ll b/llvm/test/CodeGen/LoongArch/bswap.ll
index 8c9faed964fd1..1ef73b4f1c0bf 100644
--- a/llvm/test/CodeGen/LoongArch/bswap.ll
+++ b/llvm/test/CodeGen/LoongArch/bswap.ll
@@ -15,12 +15,12 @@ define i16 @test_bswap_i16(i16 %a) nounwind {
 ; LA32-LABEL: test_bswap_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    revb.2h $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    revb.2h $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i16 @llvm.bswap.i16(i16 %a)
   ret i16 %tmp
 }
@@ -30,12 +30,12 @@ define i32 @test_bswap_i32(i32 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    revb.2h $a0, $a0
 ; LA32-NEXT:    rotri.w $a0, $a0, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    revb.2w $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i32 @llvm.bswap.i32(i32 %a)
   ret i32 %tmp
 }
@@ -48,12 +48,12 @@ define i64 @test_bswap_i64(i64 %a) nounwind {
 ; LA32-NEXT:    revb.2h $a0, $a0
 ; LA32-NEXT:    rotri.w $a1, $a0, 16
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    revb.d $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i64 @llvm.bswap.i64(i64 %a)
   ret i64 %tmp
 }
@@ -71,13 +71,13 @@ define i48 @test_bswap_i48(i48 %a) nounwind {
 ; LA32-NEXT:    slli.w $a0, $a2, 16
 ; LA32-NEXT:    or $a0, $a1, $a0
 ; LA32-NEXT:    srli.w $a1, $a2, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_i48:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    revb.d $a0, $a0
 ; LA64-NEXT:    srli.d $a0, $a0, 16
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i48 @llvm.bswap.i48(i48 %a)
   ret i48 %tmp
 }
@@ -104,7 +104,7 @@ define i80 @test_bswap_i80(i80 %a) nounwind {
 ; LA32-NEXT:    srli.w $a1, $a1, 16
 ; LA32-NEXT:    or $a1, $a1, $a2
 ; LA32-NEXT:    st.w $a1, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_i80:
 ; LA64:       # %bb.0:
@@ -114,7 +114,7 @@ define i80 @test_bswap_i80(i80 %a) nounwind {
 ; LA64-NEXT:    slli.d $a0, $a2, 16
 ; LA64-NEXT:    or $a0, $a1, $a0
 ; LA64-NEXT:    srli.d $a1, $a2, 48
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i80 @llvm.bswap.i80(i80 %a)
   ret i80 %tmp
 }
@@ -138,14 +138,14 @@ define i128 @test_bswap_i128(i128 %a) nounwind {
 ; LA32-NEXT:    revb.2h $a1, $a1
 ; LA32-NEXT:    rotri.w $a1, $a1, 16
 ; LA32-NEXT:    st.w $a1, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_bswap_i128:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    revb.d $a2, $a1
 ; LA64-NEXT:    revb.d $a1, $a0
 ; LA64-NEXT:    move $a0, $a2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %tmp = call i128 @llvm.bswap.i128(i128 %a)
   ret i128 %tmp
 }

diff  --git a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
index 2bcf45d4f0ebc..5876786720351 100644
--- a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
@@ -7,7 +7,7 @@ define i64 @callee_i128_in_regs(i64 %a, i128 %b) nounwind {
 ; CHECK-LABEL: callee_i128_in_regs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    add.d $a0, $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %b_trunc = trunc i128 %b to i64
   %1 = add i64 %a, %b_trunc
   ret i64 %1
@@ -24,7 +24,7 @@ define i64 @caller_i128_in_regs() nounwind {
 ; CHECK-NEXT:    bl callee_i128_in_regs
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i64 @callee_i128_in_regs(i64 1, i128 2)
   ret i64 %1
 }
@@ -48,7 +48,7 @@ define i64 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i128 %e, i64 %f,
 ; CHECK-NEXT:    add.d $a0, $a0, $a6
 ; CHECK-NEXT:    ld.d $a1, $sp, 8
 ; CHECK-NEXT:    add.d $a0, $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %a_ext = zext i8 %a to i64
   %b_ext = zext i16 %b to i64
   %c_ext = zext i32 %c to i64
@@ -82,7 +82,7 @@ define i64 @caller_many_scalars() nounwind {
 ; CHECK-NEXT:    bl callee_many_scalars
 ; CHECK-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 32
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i64 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i128 5, i64 6, i128 7, i64 8)
   ret i64 %1
 }
@@ -108,7 +108,7 @@ define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind {
 ; CHECK-NEXT:    or $a0, $a0, $a3
 ; CHECK-NEXT:    or $a0, $a0, $a2
 ; CHECK-NEXT:    sltui $a0, $a0, 1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = icmp eq i256 %a, %b
   %2 = zext i1 %1 to i64
   ret i64 %2
@@ -134,7 +134,7 @@ define i64 @caller_large_scalars() nounwind {
 ; CHECK-NEXT:    bl callee_large_scalars
 ; CHECK-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 80
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i64 @callee_large_scalars(i256 1, i256 2)
   ret i64 %1
 }
@@ -163,7 +163,7 @@ define i64 @callee_large_scalars_exhausted_regs(i64 %a, i64 %b, i64 %c, i64 %d,
 ; CHECK-NEXT:    or $a0, $a0, $a2
 ; CHECK-NEXT:    or $a0, $a0, $a1
 ; CHECK-NEXT:    sltui $a0, $a0, 1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = icmp eq i256 %h, %j
   %2 = zext i1 %1 to i64
   ret i64 %2
@@ -199,7 +199,7 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind {
 ; CHECK-NEXT:    bl callee_large_scalars_exhausted_regs
 ; CHECK-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 96
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i64 @callee_large_scalars_exhausted_regs(
       i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i256 8, i64 9,
       i256 10)
@@ -216,7 +216,7 @@ define i64 @callee_large_struct(ptr byval(%struct.large) align 8 %a) nounwind {
 ; CHECK-NEXT:    ld.d $a1, $a0, 24
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    add.d $a0, $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = getelementptr inbounds %struct.large, ptr %a, i64 0, i32 0
   %2 = getelementptr inbounds %struct.large, ptr %a, i64 0, i32 3
   %3 = load i64, ptr %1
@@ -246,7 +246,7 @@ define i64 @caller_large_struct() nounwind {
 ; CHECK-NEXT:    bl callee_large_struct
 ; CHECK-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 80
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %ls = alloca %struct.large, align 8
   %a = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 0
   store i64 1, ptr %a
@@ -267,7 +267,7 @@ define i128 @callee_small_scalar_ret() nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi.w $a0, $zero, -1
 ; CHECK-NEXT:    move $a1, $a0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i128 -1
 }
 
@@ -283,7 +283,7 @@ define i64 @caller_small_scalar_ret() nounwind {
 ; CHECK-NEXT:    sltui $a0, $a0, 1
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i128 @callee_small_scalar_ret()
   %2 = icmp eq i128 -2, %1
   %3 = zext i1 %2 to i64
@@ -299,7 +299,7 @@ define %struct.small @callee_small_struct_ret() nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ori $a0, $zero, 1
 ; CHECK-NEXT:    move $a1, $zero
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret %struct.small { i64 1, ptr null }
 }
 
@@ -312,7 +312,7 @@ define i64 @caller_small_struct_ret() nounwind {
 ; CHECK-NEXT:    add.d $a0, $a0, $a1
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call %struct.small @callee_small_struct_ret()
   %2 = extractvalue %struct.small %1, 0
   %3 = extractvalue %struct.small %1, 1
@@ -333,7 +333,7 @@ define i256 @callee_large_scalar_ret() nounwind {
 ; CHECK-NEXT:    lu12i.w $a1, -30141
 ; CHECK-NEXT:    ori $a1, $a1, 747
 ; CHECK-NEXT:    st.d $a1, $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i256 -123456789
 }
 
@@ -346,7 +346,7 @@ define void @caller_large_scalar_ret() nounwind {
 ; CHECK-NEXT:    bl callee_large_scalar_ret
 ; CHECK-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 48
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i256 @callee_large_scalar_ret()
   ret void
 }
@@ -368,7 +368,7 @@ define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result
 ; CHECK-NEXT:    st.w $zero, $a0, 4
 ; CHECK-NEXT:    ori $a1, $zero, 1
 ; CHECK-NEXT:    st.w $a1, $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %a = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 0
   store i64 1, ptr %a, align 4
   %b = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 1
@@ -392,7 +392,7 @@ define i64 @caller_large_struct_ret() nounwind {
 ; CHECK-NEXT:    add.d $a0, $a1, $a0
 ; CHECK-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 48
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = alloca %struct.large
   call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
   %2 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 0
@@ -414,7 +414,7 @@ define i64 @callee_float_in_fpr(i64 %a, float %b, double %c) nounwind {
 ; CHECK-NEXT:    ftintrz.l.d $fa0, $fa1
 ; CHECK-NEXT:    movfr2gr.d $a1, $fa0
 ; CHECK-NEXT:    add.d $a0, $a0, $a1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %b_fptosi = fptosi float %b to i64
   %c_fptosi = fptosi double %c to i64
   %1 = add i64 %a, %b_fptosi
@@ -433,7 +433,7 @@ define i64 @caller_float_in_fpr() nounwind {
 ; CHECK-NEXT:    bl callee_float_in_fpr
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i64 @callee_float_in_fpr(i64 1, float 0.0, double 0.0)
   ret i64 %1
 }
@@ -450,7 +450,7 @@ define i64 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c,
 ; CHECK-NEXT:    ftintrz.l.d $fa0, $fa0
 ; CHECK-NEXT:    movfr2gr.d $a0, $fa0
 ; CHECK-NEXT:    add.d $a0, $a1, $a0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %h_fptosi = fptosi double %h to i64
   %i_fptosi = fptosi double %i to i64
   %1 = add i64 %h_fptosi, %i_fptosi
@@ -492,7 +492,7 @@ define i64 @caller_double_in_gpr_exhausted_fprs() nounwind {
 ; CHECK-NEXT:    bl callee_double_in_gpr_exhausted_fprs
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call i64 @callee_double_in_gpr_exhausted_fprs(
       double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0,
       double 7.0, double 8.0, double 9.0)
@@ -507,7 +507,7 @@ define double @callee_double_ret() nounwind {
 ; CHECK-NEXT:    addi.d $a0, $zero, 1
 ; CHECK-NEXT:    movgr2fr.d $fa0, $a0
 ; CHECK-NEXT:    ffint.d.l $fa0, $fa0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret double 1.0
 }
 
@@ -520,7 +520,7 @@ define i64 @caller_double_ret() nounwind {
 ; CHECK-NEXT:    movfr2gr.d $a0, $fa0
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = call double @callee_double_ret()
   %2 = bitcast double %1 to i64
   ret i64 %2

diff  --git a/llvm/test/CodeGen/LoongArch/double-imm.ll b/llvm/test/CodeGen/LoongArch/double-imm.ll
index a7782cf85954d..f21deb0c00389 100644
--- a/llvm/test/CodeGen/LoongArch/double-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/double-imm.ll
@@ -7,12 +7,12 @@ define double @f64_positive_zero() nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $zero
 ; LA32-NEXT:    movgr2frh.w $fa0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_positive_zero:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.d $fa0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret double 0.0
 }
 
@@ -22,13 +22,13 @@ define double @f64_negative_zero() nounwind {
 ; LA32-NEXT:    movgr2fr.w $fa0, $zero
 ; LA32-NEXT:    movgr2frh.w $fa0, $zero
 ; LA32-NEXT:    fneg.d $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_negative_zero:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.d $fa0, $zero
 ; LA64-NEXT:    fneg.d $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret double -0.0
 }
 
@@ -38,14 +38,14 @@ define double @f64_constant_pi() nounwind {
 ; LA32-NEXT:    pcalau12i $a0, .LCPI2_0
 ; LA32-NEXT:    addi.w $a0, $a0, .LCPI2_0
 ; LA32-NEXT:    fld.d $fa0, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_constant_pi:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    pcalau12i $a0, .LCPI2_0
 ; LA64-NEXT:    addi.d $a0, $a0, .LCPI2_0
 ; LA64-NEXT:    fld.d $fa0, $a0, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret double 3.1415926535897931159979634685441851615905761718750
 }
 
@@ -57,7 +57,7 @@ define double @f64_add_fimm1(double %a) nounwind {
 ; LA32-NEXT:    ffint.s.w $fa1, $fa1
 ; LA32-NEXT:    fcvt.d.s $fa1, $fa1
 ; LA32-NEXT:    fadd.d $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_add_fimm1:
 ; LA64:       # %bb.0:
@@ -65,7 +65,7 @@ define double @f64_add_fimm1(double %a) nounwind {
 ; LA64-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64-NEXT:    ffint.d.l $fa1, $fa1
 ; LA64-NEXT:    fadd.d $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fadd double %a, 1.0
   ret double %1
 }
@@ -77,13 +77,13 @@ define double @f64_positive_fimm1() nounwind {
 ; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ffint.s.w $fa0, $fa0
 ; LA32-NEXT:    fcvt.d.s $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_positive_fimm1:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $zero, 1
 ; LA64-NEXT:    movgr2fr.d $fa0, $a0
 ; LA64-NEXT:    ffint.d.l $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret double 1.0
 }

diff  --git a/llvm/test/CodeGen/LoongArch/float-imm.ll b/llvm/test/CodeGen/LoongArch/float-imm.ll
index a6b542c29ed73..d9c184571caab 100644
--- a/llvm/test/CodeGen/LoongArch/float-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/float-imm.ll
@@ -6,12 +6,12 @@ define float @f32_positive_zero() nounwind {
 ; LA32-LABEL: f32_positive_zero:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_positive_zero:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.w $fa0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret float 0.0
 }
 
@@ -20,13 +20,13 @@ define float @f32_negative_zero() nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $zero
 ; LA32-NEXT:    fneg.s $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_negative_zero:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.w $fa0, $zero
 ; LA64-NEXT:    fneg.s $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret float -0.0
 }
 
@@ -36,14 +36,14 @@ define float @f32_constant_pi() nounwind {
 ; LA32-NEXT:    pcalau12i $a0, .LCPI2_0
 ; LA32-NEXT:    addi.w $a0, $a0, .LCPI2_0
 ; LA32-NEXT:    fld.s $fa0, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_constant_pi:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    pcalau12i $a0, .LCPI2_0
 ; LA64-NEXT:    addi.d $a0, $a0, .LCPI2_0
 ; LA64-NEXT:    fld.s $fa0, $a0, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret float 3.14159274101257324218750
 }
 
@@ -54,7 +54,7 @@ define float @f32_add_fimm1(float %a) nounwind {
 ; LA32-NEXT:    movgr2fr.w $fa1, $a0
 ; LA32-NEXT:    ffint.s.w $fa1, $fa1
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_add_fimm1:
 ; LA64:       # %bb.0:
@@ -62,7 +62,7 @@ define float @f32_add_fimm1(float %a) nounwind {
 ; LA64-NEXT:    movgr2fr.w $fa1, $a0
 ; LA64-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fadd float %a, 1.0
   ret float %1
 }
@@ -73,13 +73,13 @@ define float @f32_positive_fimm1() nounwind {
 ; LA32-NEXT:    addi.w $a0, $zero, 1
 ; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ffint.s.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_positive_fimm1:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.w $a0, $zero, 1
 ; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ffint.s.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   ret float 1.0
 }

diff  --git a/llvm/test/CodeGen/LoongArch/frame.ll b/llvm/test/CodeGen/LoongArch/frame.ll
index 04f59d2827539..d80c6dd63f676 100644
--- a/llvm/test/CodeGen/LoongArch/frame.ll
+++ b/llvm/test/CodeGen/LoongArch/frame.ll
@@ -16,7 +16,7 @@ define i32 @test() nounwind {
 ; CHECK-NEXT:    move $a0, $zero
 ; CHECK-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 32
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %key = alloca %struct.key_t, align 4
   call void @llvm.memset.p0i8.i64(ptr %key, i8 0, i64 20, i1 false)
   %1 = getelementptr inbounds %struct.key_t, ptr %key, i64 0, i32 1, i64 0

diff  --git a/llvm/test/CodeGen/LoongArch/imm.ll b/llvm/test/CodeGen/LoongArch/imm.ll
index fb0dcf21f231e..f8b7a61d60973 100644
--- a/llvm/test/CodeGen/LoongArch/imm.ll
+++ b/llvm/test/CodeGen/LoongArch/imm.ll
@@ -1,10 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s
 
 define i64 @imm0() {
 ; CHECK-LABEL: imm0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    move $a0, $zero
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 0
 }
 
@@ -12,7 +13,7 @@ define i64 @imm7ff0000000000000() {
 ; CHECK-LABEL: imm7ff0000000000000:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lu52i.d $a0, $zero, 2047
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 9218868437227405312
 }
 
@@ -20,7 +21,7 @@ define i64 @imm0000000000000fff() {
 ; CHECK-LABEL: imm0000000000000fff:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ori $a0, $zero, 4095
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 4095
 }
 
@@ -29,7 +30,7 @@ define i64 @imm0007ffff00000800() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ori $a0, $zero, 2048
 ; CHECK-NEXT:    lu32i.d $a0, 524287
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 2251795518720000
 }
 
@@ -38,7 +39,7 @@ define i64 @immfff0000000000fff() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ori $a0, $zero, 4095
 ; CHECK-NEXT:    lu52i.d $a0, $a0, -1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 -4503599627366401
 }
 
@@ -48,7 +49,7 @@ define i64 @imm0008000000000fff() {
 ; CHECK-NEXT:    ori $a0, $zero, 4095
 ; CHECK-NEXT:    lu32i.d $a0, -524288
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 2251799813689343
 }
 
@@ -56,7 +57,7 @@ define i64 @immfffffffffffff800() {
 ; CHECK-LABEL: immfffffffffffff800:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi.w $a0, $zero, -2048
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 -2048
 }
 
@@ -65,7 +66,7 @@ define i64 @imm00000000fffff800() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi.w $a0, $zero, -2048
 ; CHECK-NEXT:    lu32i.d $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 4294965248
 }
 
@@ -74,7 +75,7 @@ define i64 @imm000ffffffffff800() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi.w $a0, $zero, -2048
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 4503599627368448
 }
 
@@ -84,7 +85,7 @@ define i64 @imm00080000fffff800() {
 ; CHECK-NEXT:    addi.w $a0, $zero, -2048
 ; CHECK-NEXT:    lu32i.d $a0, -524288
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 2251804108650496
 }
 
@@ -92,7 +93,7 @@ define i64 @imm000000007ffff000() {
 ; CHECK-LABEL: imm000000007ffff000:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lu12i.w $a0, 524287
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 2147479552
 }
 
@@ -101,7 +102,7 @@ define i64 @imm0000000080000000() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lu12i.w $a0, -524288
 ; CHECK-NEXT:    lu32i.d $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 2147483648
 }
 
@@ -110,7 +111,7 @@ define i64 @imm000ffffffffff000() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lu12i.w $a0, -1
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 4503599627366400
 }
 
@@ -120,7 +121,7 @@ define i64 @imm7ff0000080000000() {
 ; CHECK-NEXT:    lu12i.w $a0, -524288
 ; CHECK-NEXT:    lu32i.d $a0, 0
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 2047
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 9218868439374888960
 }
 
@@ -129,7 +130,7 @@ define i64 @immffffffff80000800() {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lu12i.w $a0, -524288
 ; CHECK-NEXT:    ori $a0, $a0, 2048
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 -2147481600
 }
 
@@ -139,7 +140,7 @@ define i64 @immffffffff7ffff800() {
 ; CHECK-NEXT:    lu12i.w $a0, 524287
 ; CHECK-NEXT:    ori $a0, $a0, 2048
 ; CHECK-NEXT:    lu32i.d $a0, -1
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 -2147485696
 }
 
@@ -149,7 +150,7 @@ define i64 @imm7fffffff800007ff() {
 ; CHECK-NEXT:    lu12i.w $a0, -524288
 ; CHECK-NEXT:    ori $a0, $a0, 2047
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 2047
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 9223372034707294207
 }
 
@@ -160,6 +161,6 @@ define i64 @imm0008000080000800() {
 ; CHECK-NEXT:    ori $a0, $a0, 2048
 ; CHECK-NEXT:    lu32i.d $a0, -524288
 ; CHECK-NEXT:    lu52i.d $a0, $a0, 0
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   ret i64 2251801961170944
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
index bfa1a59756b81..d96adb250eb1d 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define i1 @add_i1(i1 %x, i1 %y) {
 ; LA32-LABEL: add_i1:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    add.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i1:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i1 %x, %y
   ret i1 %add
 }
@@ -21,12 +22,12 @@ define i8 @add_i8(i8 %x, i8 %y) {
 ; LA32-LABEL: add_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    add.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i8 %x, %y
   ret i8 %add
 }
@@ -35,12 +36,12 @@ define i16 @add_i16(i16 %x, i16 %y) {
 ; LA32-LABEL: add_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    add.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i16 %x, %y
   ret i16 %add
 }
@@ -49,12 +50,12 @@ define i32 @add_i32(i32 %x, i32 %y) {
 ; LA32-LABEL: add_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    add.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i32 %x, %y
   ret i32 %add
 }
@@ -65,12 +66,12 @@ define signext i32 @add_i32_sext(i32 %x, i32 %y) {
 ; LA32-LABEL: add_i32_sext:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    add.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i32_sext:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    add.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i32 %x, %y
   ret i32 %add
 }
@@ -83,12 +84,12 @@ define i64 @add_i64(i64 %x, i64 %y) {
 ; LA32-NEXT:    sltu $a0, $a2, $a0
 ; LA32-NEXT:    add.w $a1, $a1, $a0
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i64 %x, %y
   ret i64 %add
 }
@@ -97,12 +98,12 @@ define i1 @add_i1_3(i1 %x) {
 ; LA32-LABEL: add_i1_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i1_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i1 %x, 3
   ret i1 %add
 }
@@ -111,12 +112,12 @@ define i8 @add_i8_3(i8 %x) {
 ; LA32-LABEL: add_i8_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i8_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i8 %x, 3
   ret i8 %add
 }
@@ -125,12 +126,12 @@ define i16 @add_i16_3(i16 %x) {
 ; LA32-LABEL: add_i16_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i16_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i16 %x, 3
   ret i16 %add
 }
@@ -139,12 +140,12 @@ define i32 @add_i32_3(i32 %x) {
 ; LA32-LABEL: add_i32_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i32_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i32 %x, 3
   ret i32 %add
 }
@@ -155,12 +156,12 @@ define signext i32 @add_i32_3_sext(i32 %x) {
 ; LA32-LABEL: add_i32_3_sext:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i32_3_sext:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.w $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i32 %x, 3
   ret i32 %add
 }
@@ -172,12 +173,12 @@ define i64 @add_i64_3(i64 %x) {
 ; LA32-NEXT:    sltu $a0, $a2, $a0
 ; LA32-NEXT:    add.w $a1, $a1, $a0
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: add_i64_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = add i64 %x, 3
   ret i64 %add
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/and.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/and.ll
index e5c9da58c7570..47b990febe8ac 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/and.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/and.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define i1 @and_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: and_i1:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    and $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i1:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i1 %a, %b
   ret i1 %r
@@ -22,12 +23,12 @@ define i8 @and_i8(i8 %a, i8 %b) {
 ; LA32-LABEL: and_i8:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    and $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i8 %a, %b
   ret i8 %r
@@ -37,12 +38,12 @@ define i16 @and_i16(i16 %a, i16 %b) {
 ; LA32-LABEL: and_i16:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    and $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i16 %a, %b
   ret i16 %r
@@ -52,12 +53,12 @@ define i32 @and_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: and_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    and $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i32 %a, %b
   ret i32 %r
@@ -68,12 +69,12 @@ define i64 @and_i64(i64 %a, i64 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    and $a0, $a0, $a2
 ; LA32-NEXT:    and $a1, $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i64 %a, %b
   ret i64 %r
@@ -83,12 +84,12 @@ define i1 @and_i1_0(i1 %b) {
 ; LA32-LABEL: and_i1_0:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    move $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i1_0:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    move $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i1 4, %b
   ret i1 %r
@@ -97,11 +98,11 @@ entry:
 define i1 @and_i1_5(i1 %b) {
 ; LA32-LABEL: and_i1_5:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i1_5:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i1 5, %b
   ret i1 %r
@@ -111,12 +112,12 @@ define i8 @and_i8_5(i8 %b) {
 ; LA32-LABEL: and_i8_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    andi $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i8_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i8 5, %b
   ret i8 %r
@@ -126,12 +127,12 @@ define i8 @and_i8_257(i8 %b) {
 ; LA32-LABEL: and_i8_257:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    andi $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i8_257:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i8 257, %b
   ret i8 %r
@@ -141,12 +142,12 @@ define i16 @and_i16_5(i16 %b) {
 ; LA32-LABEL: and_i16_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    andi $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i16_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i16 5, %b
   ret i16 %r
@@ -157,13 +158,13 @@ define i16 @and_i16_0x1000(i16 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a1, 1
 ; LA32-NEXT:    and $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i16_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i16 4096, %b
   ret i16 %r
@@ -173,12 +174,12 @@ define i16 @and_i16_0x10001(i16 %b) {
 ; LA32-LABEL: and_i16_0x10001:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    andi $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i16_0x10001:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i16 65537, %b
   ret i16 %r
@@ -188,12 +189,12 @@ define i32 @and_i32_5(i32 %b) {
 ; LA32-LABEL: and_i32_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    andi $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i32_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i32 5, %b
   ret i32 %r
@@ -204,13 +205,13 @@ define i32 @and_i32_0x1000(i32 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a1, 1
 ; LA32-NEXT:    and $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i32_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i32 4096, %b
   ret i32 %r
@@ -220,12 +221,12 @@ define i32 @and_i32_0x100000001(i32 %b) {
 ; LA32-LABEL: and_i32_0x100000001:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    andi $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i32_0x100000001:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i32 4294967297, %b
   ret i32 %r
@@ -236,12 +237,12 @@ define i64 @and_i64_5(i64 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    andi $a0, $a0, 5
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i64_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i64 5, %b
   ret i64 %r
@@ -253,13 +254,13 @@ define i64 @and_i64_0x1000(i64 %b) {
 ; LA32-NEXT:    lu12i.w $a1, 1
 ; LA32-NEXT:    and $a0, $a0, $a1
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: and_i64_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    and $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = and i64 4096, %b
   ret i64 %r

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll
index 1b7e8085185a0..0d8e7127d0df8 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -6,11 +7,11 @@
 define i1 @ashr_i1(i1 %x, i1 %y) {
 ; LA32-LABEL: ashr_i1:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i1 %x, %y
   ret i1 %ashr
 }
@@ -20,13 +21,13 @@ define i8 @ashr_i8(i8 %x, i8 %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.b $a0, $a0
 ; LA32-NEXT:    sra.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.b $a0, $a0
 ; LA64-NEXT:    sra.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i8 %x, %y
   ret i8 %ashr
 }
@@ -36,13 +37,13 @@ define i16 @ashr_i16(i16 %x, i16 %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.h $a0, $a0
 ; LA32-NEXT:    sra.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.h $a0, $a0
 ; LA64-NEXT:    sra.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i16 %x, %y
   ret i16 %ashr
 }
@@ -51,12 +52,12 @@ define i32 @ashr_i32(i32 %x, i32 %y) {
 ; LA32-LABEL: ashr_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sra.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sra.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i32 %x, %y
   ret i32 %ashr
 }
@@ -81,12 +82,12 @@ define i64 @ashr_i64(i64 %x, i64 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a5
 ; LA32-NEXT:    or $a0, $a0, $a1
 ; LA32-NEXT:    move $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sra.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i64 %x, %y
   ret i64 %ashr
 }
@@ -94,11 +95,11 @@ define i64 @ashr_i64(i64 %x, i64 %y) {
 define i1 @ashr_i1_3(i1 %x) {
 ; LA32-LABEL: ashr_i1_3:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i1_3:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i1 %x, 3
   ret i1 %ashr
 }
@@ -108,13 +109,13 @@ define i8 @ashr_i8_3(i8 %x) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.b $a0, $a0
 ; LA32-NEXT:    srai.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i8_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.b $a0, $a0
 ; LA64-NEXT:    srai.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i8 %x, 3
   ret i8 %ashr
 }
@@ -124,13 +125,13 @@ define i16 @ashr_i16_3(i16 %x) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.h $a0, $a0
 ; LA32-NEXT:    srai.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i16_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.h $a0, $a0
 ; LA64-NEXT:    srai.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i16 %x, 3
   ret i16 %ashr
 }
@@ -139,13 +140,13 @@ define i32 @ashr_i32_3(i32 %x) {
 ; LA32-LABEL: ashr_i32_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    srai.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i32_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    srai.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i32 %x, 3
   ret i32 %ashr
 }
@@ -157,12 +158,12 @@ define i64 @ashr_i64_3(i64 %x) {
 ; LA32-NEXT:    slli.w $a2, $a1, 29
 ; LA32-NEXT:    or $a0, $a0, $a2
 ; LA32-NEXT:    srai.w $a1, $a1, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ashr_i64_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    srai.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %ashr = ashr i64 %x, 3
   ret i64 %ashr
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
index a0074f7593966..7ab4788e6a31a 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
@@ -22,7 +22,7 @@ define void @foo_br_eq(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB1_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_eq:
 ; LA64:       # %bb.0:
@@ -32,7 +32,7 @@ define void @foo_br_eq(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB1_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp eq i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -52,7 +52,7 @@ define void @foo_br_ne(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB2_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_ne:
 ; LA64:       # %bb.0:
@@ -62,7 +62,7 @@ define void @foo_br_ne(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB2_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp ne i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -82,7 +82,7 @@ define void @foo_br_slt(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB3_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_slt:
 ; LA64:       # %bb.0:
@@ -92,7 +92,7 @@ define void @foo_br_slt(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB3_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp slt i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -112,7 +112,7 @@ define void @foo_br_sge(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB4_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_sge:
 ; LA64:       # %bb.0:
@@ -122,7 +122,7 @@ define void @foo_br_sge(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB4_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp sge i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -142,7 +142,7 @@ define void @foo_br_ult(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB5_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_ult:
 ; LA64:       # %bb.0:
@@ -152,7 +152,7 @@ define void @foo_br_ult(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB5_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp ult i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -172,7 +172,7 @@ define void @foo_br_uge(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB6_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_uge:
 ; LA64:       # %bb.0:
@@ -182,7 +182,7 @@ define void @foo_br_uge(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB6_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp uge i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -203,7 +203,7 @@ define void @foo_br_sgt(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB7_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_sgt:
 ; LA64:       # %bb.0:
@@ -213,7 +213,7 @@ define void @foo_br_sgt(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB7_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp sgt i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -233,7 +233,7 @@ define void @foo_br_sle(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB8_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_sle:
 ; LA64:       # %bb.0:
@@ -243,7 +243,7 @@ define void @foo_br_sle(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB8_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp sle i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -263,7 +263,7 @@ define void @foo_br_ugt(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB9_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_ugt:
 ; LA64:       # %bb.0:
@@ -273,7 +273,7 @@ define void @foo_br_ugt(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB9_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp ugt i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -293,7 +293,7 @@ define void @foo_br_ule(i32 %a, ptr %b) nounwind {
 ; LA32-NEXT:  # %bb.1: # %test
 ; LA32-NEXT:    ld.w $a0, $a1, 0
 ; LA32-NEXT:  .LBB10_2: # %end
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: foo_br_ule:
 ; LA64:       # %bb.0:
@@ -303,7 +303,7 @@ define void @foo_br_ule(i32 %a, ptr %b) nounwind {
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
 ; LA64-NEXT:  .LBB10_2: # %end
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load volatile i32, ptr %b
   %cc = icmp ule i32 %val, %a
   br i1 %cc, label %end, label %test
@@ -326,7 +326,7 @@ define void @foo_br_cc(ptr %a, i1 %cc) nounwind {
 ; ALL-NEXT:  # %bb.1: # %test
 ; ALL-NEXT:    ld.w $a0, $a0, 0
 ; ALL-NEXT:  .LBB11_2: # %end
-; ALL-NEXT:    jirl $zero, $ra, 0
+; ALL-NEXT:    ret
   %val = load volatile i32, ptr %a
   br i1 %cc, label %end, label %test
 test:

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
index 596ea22e5854e..5b499fa21690c 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/call.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck --check-prefix=LA32 %s
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck --check-prefix=LA64 %s
 
@@ -11,7 +12,7 @@ define i32 @test_call_external(i32 %a) nounwind {
 ; LA32-NEXT:    bl external_function
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_call_external:
 ; LA64:       # %bb.0:
@@ -20,7 +21,7 @@ define i32 @test_call_external(i32 %a) nounwind {
 ; LA64-NEXT:    bl external_function
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = call i32 @external_function(i32 %a)
   ret i32 %1
 }
@@ -29,12 +30,12 @@ define i32 @defined_function(i32 %a) nounwind {
 ; LA32-LABEL: defined_function:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: defined_function:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = add i32 %a, 1
   ret i32 %1
 }
@@ -47,7 +48,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
 ; LA32-NEXT:    bl defined_function
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_call_defined:
 ; LA64:       # %bb.0:
@@ -56,7 +57,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
 ; LA64-NEXT:    bl defined_function
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = call i32 @defined_function(i32 %a) nounwind
   ret i32 %1
 }
@@ -71,7 +72,7 @@ define i32 @test_call_indirect(ptr %a, i32 %b) nounwind {
 ; LA32-NEXT:    jirl $ra, $a2, 0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_call_indirect:
 ; LA64:       # %bb.0:
@@ -82,7 +83,7 @@ define i32 @test_call_indirect(ptr %a, i32 %b) nounwind {
 ; LA64-NEXT:    jirl $ra, $a2, 0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = call i32 %a(i32 %b)
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
index 1be434ef182e7..d1b83cdff06bf 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
@@ -6,12 +6,12 @@ define float @convert_double_to_float(double %a) nounwind {
 ; LA32-LABEL: convert_double_to_float:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcvt.s.d $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_double_to_float:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcvt.s.d $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fptrunc double %a to float
   ret float %1
 }
@@ -20,12 +20,12 @@ define double @convert_float_to_double(float %a) nounwind {
 ; LA32-LABEL: convert_float_to_double:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcvt.d.s $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_float_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcvt.d.s $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fpext float %a to double
   ret double %1
 }
@@ -35,13 +35,13 @@ define double @convert_i8_to_double(i8 signext %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ffint.d.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_i8_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ffint.d.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sitofp i8 %a to double
   ret double %1
 }
@@ -51,13 +51,13 @@ define double @convert_i16_to_double(i16 signext %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ffint.d.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_i16_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ffint.d.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sitofp i16 %a to double
   ret double %1
 }
@@ -67,13 +67,13 @@ define double @convert_i32_to_double(i32 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ffint.d.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_i32_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ffint.d.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sitofp i32 %a to double
   ret double %1
 }
@@ -86,13 +86,13 @@ define double @convert_i64_to_double(i64 %a) nounwind {
 ; LA32-NEXT:    bl __floatdidf
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_i64_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.d $fa0, $a0
 ; LA64-NEXT:    ffint.d.l $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sitofp i64 %a to double
   ret double %1
 }
@@ -102,13 +102,13 @@ define i32 @convert_double_to_i32(double %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ftintrz.w.d $fa0, $fa0
 ; LA32-NEXT:    movfr2gr.s $a0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_double_to_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ftintrz.w.d $fa0, $fa0
 ; LA64-NEXT:    movfr2gr.s $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fptosi double %a to i32
   ret i32 %1
 }
@@ -131,13 +131,13 @@ define i32 @convert_double_to_u32(double %a) nounwind {
 ; LA32-NEXT:    movfr2gr.s $a2, $fa0
 ; LA32-NEXT:    maskeqz $a1, $a2, $a1
 ; LA32-NEXT:    or $a0, $a1, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_double_to_u32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ftintrz.l.d $fa0, $fa0
 ; LA64-NEXT:    movfr2gr.d $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fptoui double %a to i32
   ret i32 %1
 }
@@ -150,13 +150,13 @@ define i64 @convert_double_to_i64(double %a) nounwind {
 ; LA32-NEXT:    bl __fixdfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_double_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ftintrz.l.d $fa0, $fa0
 ; LA64-NEXT:    movfr2gr.d $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fptosi double %a to i64
   ret i64 %1
 }
@@ -169,7 +169,7 @@ define i64 @convert_double_to_u64(double %a) nounwind {
 ; LA32-NEXT:    bl __fixunsdfdi
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_double_to_u64:
 ; LA64:       # %bb.0:
@@ -188,7 +188,7 @@ define i64 @convert_double_to_u64(double %a) nounwind {
 ; LA64-NEXT:    movfr2gr.d $a2, $fa0
 ; LA64-NEXT:    maskeqz $a1, $a2, $a1
 ; LA64-NEXT:    or $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = fptoui double %a to i64
   ret i64 %1
 }
@@ -198,13 +198,13 @@ define double @convert_u8_to_double(i8 zeroext %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ffint.d.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_u8_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ffint.d.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = uitofp i8 %a to double
   ret double %1
 }
@@ -214,13 +214,13 @@ define double @convert_u16_to_double(i16 zeroext %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ffint.d.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_u16_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ffint.d.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = uitofp i16 %a to double
   ret double %1
 }
@@ -238,7 +238,7 @@ define double @convert_u32_to_double(i32 %a) nounwind {
 ; LA32-NEXT:    fld.d $fa1, $sp, 8
 ; LA32-NEXT:    fsub.d $fa0, $fa1, $fa0
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_u32_to_double:
 ; LA64:       # %bb.0:
@@ -252,7 +252,7 @@ define double @convert_u32_to_double(i32 %a) nounwind {
 ; LA64-NEXT:    bstrins.d $a0, $a1, 63, 32
 ; LA64-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64-NEXT:    fadd.d $fa0, $fa1, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = uitofp i32 %a to double
   ret double %1
 }
@@ -265,7 +265,7 @@ define double @convert_u64_to_double(i64 %a) nounwind {
 ; LA32-NEXT:    bl __floatundidf
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: convert_u64_to_double:
 ; LA64:       # %bb.0:
@@ -281,7 +281,7 @@ define double @convert_u64_to_double(i64 %a) nounwind {
 ; LA64-NEXT:    bstrins.d $a0, $a1, 63, 32
 ; LA64-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64-NEXT:    fadd.d $fa0, $fa1, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = uitofp i64 %a to double
   ret double %1
 }
@@ -294,12 +294,12 @@ define double @bitcast_i64_to_double(i64 %a, i64 %b) nounwind {
 ; LA32-NEXT:    st.w $a0, $sp, 8
 ; LA32-NEXT:    fld.d $fa0, $sp, 8
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: bitcast_i64_to_double:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movgr2fr.d $fa0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = bitcast i64 %a to double
   ret double %1
 }
@@ -312,12 +312,12 @@ define i64 @bitcast_double_to_i64(double %a) nounwind {
 ; LA32-NEXT:    ld.w $a0, $sp, 8
 ; LA32-NEXT:    ld.w $a1, $sp, 12
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: bitcast_double_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    movfr2gr.d $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = bitcast double %a to i64
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
index 15e1118d2e560..0c509297e2195 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define float @fadd_s(float %x, float %y) {
 ; LA32-LABEL: fadd_s:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fadd_s:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = fadd float %x, %y
   ret float %add
 }
@@ -21,12 +22,12 @@ define double @fadd_d(double %x, double %y) {
 ; LA32-LABEL: fadd_d:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fadd.d $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fadd_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fadd.d $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %add = fadd double %x, %y
   ret double %add
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
index bb35405abc018..9743dca47580b 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -8,12 +9,12 @@ define i1 @fcmp_false(double %a, double %b) {
 ; LA32-LABEL: fcmp_false:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    move $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_false:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    move $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp false double %a, %b
   ret i1 %cmp
 }
@@ -23,13 +24,13 @@ define i1 @fcmp_oeq(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oeq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oeq double %a, %b
   ret i1 %cmp
 }
@@ -39,13 +40,13 @@ define i1 @fcmp_ogt(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ogt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ogt double %a, %b
   ret i1 %cmp
 }
@@ -55,13 +56,13 @@ define i1 @fcmp_oge(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oge double %a, %b
   ret i1 %cmp
 }
@@ -71,13 +72,13 @@ define i1 @fcmp_olt(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_olt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp olt double %a, %b
   ret i1 %cmp
 }
@@ -87,13 +88,13 @@ define i1 @fcmp_ole(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ole:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ole double %a, %b
   ret i1 %cmp
 }
@@ -103,13 +104,13 @@ define i1 @fcmp_one(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_one:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp one double %a, %b
   ret i1 %cmp
 }
@@ -119,13 +120,13 @@ define i1 @fcmp_ord(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ord:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ord double %a, %b
   ret i1 %cmp
 }
@@ -135,13 +136,13 @@ define i1 @fcmp_ueq(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ueq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ueq double %a, %b
   ret i1 %cmp
 }
@@ -151,13 +152,13 @@ define i1 @fcmp_ugt(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ugt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ugt double %a, %b
   ret i1 %cmp
 }
@@ -167,13 +168,13 @@ define i1 @fcmp_uge(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uge double %a, %b
   ret i1 %cmp
 }
@@ -183,13 +184,13 @@ define i1 @fcmp_ult(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ult:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ult double %a, %b
   ret i1 %cmp
 }
@@ -199,13 +200,13 @@ define i1 @fcmp_ule(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ule:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ule double %a, %b
   ret i1 %cmp
 }
@@ -215,13 +216,13 @@ define i1 @fcmp_une(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_une:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp une double %a, %b
   ret i1 %cmp
 }
@@ -231,13 +232,13 @@ define i1 @fcmp_uno(double %a, double %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uno:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uno double %a, %b
   ret i1 %cmp
 }
@@ -246,12 +247,12 @@ define i1 @fcmp_true(double %a, double %b) {
 ; LA32-LABEL: fcmp_true:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ori $a0, $zero, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_true:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ori $a0, $zero, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp true double %a, %b
   ret i1 %cmp
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
index 33bdd0b50bd49..01c8b860291e8 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
 
@@ -8,12 +9,12 @@ define i1 @fcmp_false(float %a, float %b) {
 ; LA32-LABEL: fcmp_false:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    move $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_false:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    move $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp false float %a, %b
   ret i1 %cmp
 }
@@ -23,13 +24,13 @@ define i1 @fcmp_oeq(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oeq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oeq float %a, %b
   ret i1 %cmp
 }
@@ -39,13 +40,13 @@ define i1 @fcmp_ogt(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ogt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ogt float %a, %b
   ret i1 %cmp
 }
@@ -55,13 +56,13 @@ define i1 @fcmp_oge(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oge float %a, %b
   ret i1 %cmp
 }
@@ -71,13 +72,13 @@ define i1 @fcmp_olt(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_olt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp olt float %a, %b
   ret i1 %cmp
 }
@@ -87,13 +88,13 @@ define i1 @fcmp_ole(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ole:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ole float %a, %b
   ret i1 %cmp
 }
@@ -103,13 +104,13 @@ define i1 @fcmp_one(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_one:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp one float %a, %b
   ret i1 %cmp
 }
@@ -119,13 +120,13 @@ define i1 @fcmp_ord(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ord:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ord float %a, %b
   ret i1 %cmp
 }
@@ -135,13 +136,13 @@ define i1 @fcmp_ueq(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ueq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ueq float %a, %b
   ret i1 %cmp
 }
@@ -151,13 +152,13 @@ define i1 @fcmp_ugt(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ugt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ugt float %a, %b
   ret i1 %cmp
 }
@@ -167,13 +168,13 @@ define i1 @fcmp_uge(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uge float %a, %b
   ret i1 %cmp
 }
@@ -183,13 +184,13 @@ define i1 @fcmp_ult(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ult:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ult float %a, %b
   ret i1 %cmp
 }
@@ -199,13 +200,13 @@ define i1 @fcmp_ule(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ule:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ule float %a, %b
   ret i1 %cmp
 }
@@ -215,13 +216,13 @@ define i1 @fcmp_une(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_une:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp une float %a, %b
   ret i1 %cmp
 }
@@ -231,13 +232,13 @@ define i1 @fcmp_uno(float %a, float %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uno:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    movcf2gr $a0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uno float %a, %b
   ret i1 %cmp
 }
@@ -246,12 +247,12 @@ define i1 @fcmp_true(float %a, float %b) {
 ; LA32-LABEL: fcmp_true:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ori $a0, $zero, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_true:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ori $a0, $zero, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp true float %a, %b
   ret i1 %cmp
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
index 9c3f85950d5d4..e3154122c90d8 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define float @fdiv_s(float %x, float %y) {
 ; LA32-LABEL: fdiv_s:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fdiv.s $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fdiv_s:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fdiv.s $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %div = fdiv float %x, %y
   ret float %div
 }
@@ -21,12 +22,12 @@ define double @fdiv_d(double %x, double %y) {
 ; LA32-LABEL: fdiv_d:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fdiv.d $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fdiv_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fdiv.d $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %div = fdiv double %x, %y
   ret double %div
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll
index f8c98bbc71384..724639f3c6fb9 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -5,12 +6,12 @@ define void @fence_acquire() nounwind {
 ; LA32-LABEL: fence_acquire:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    dbar 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fence_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   fence acquire
   ret void
 }
@@ -19,12 +20,12 @@ define void @fence_release() nounwind {
 ; LA32-LABEL: fence_release:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    dbar 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fence_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   fence release
   ret void
 }
@@ -33,12 +34,12 @@ define void @fence_acq_rel() nounwind {
 ; LA32-LABEL: fence_acq_rel:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    dbar 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fence_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   fence acq_rel
   ret void
 }
@@ -47,12 +48,12 @@ define void @fence_seq_cst() nounwind {
 ; LA32-LABEL: fence_seq_cst:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    dbar 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fence_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   fence seq_cst
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
index 167249b026179..4a0f2ff685f3d 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
@@ -9,25 +9,25 @@ define signext i8 @convert_float_to_i8(float %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32F-NEXT:    movfr2gr.s $a0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_i8:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32D-NEXT:    movfr2gr.s $a0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_i8:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA64F-NEXT:    movfr2gr.s $a0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_i8:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
 ; LA64D-NEXT:    movfr2gr.d $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptosi float %a to i8
   ret i8 %1
 }
@@ -37,25 +37,25 @@ define signext i16 @convert_float_to_i16(float %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32F-NEXT:    movfr2gr.s $a0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_i16:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32D-NEXT:    movfr2gr.s $a0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_i16:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA64F-NEXT:    movfr2gr.s $a0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_i16:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
 ; LA64D-NEXT:    movfr2gr.d $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptosi float %a to i16
   ret i16 %1
 }
@@ -65,25 +65,25 @@ define i32 @convert_float_to_i32(float %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32F-NEXT:    movfr2gr.s $a0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_i32:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32D-NEXT:    movfr2gr.s $a0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_i32:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA64F-NEXT:    movfr2gr.s $a0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_i32:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA64D-NEXT:    movfr2gr.s $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptosi float %a to i32
   ret i32 %1
 }
@@ -96,7 +96,7 @@ define i64 @convert_float_to_i64(float %a) nounwind {
 ; LA32F-NEXT:    bl __fixsfdi
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_i64:
 ; LA32D:       # %bb.0:
@@ -105,19 +105,19 @@ define i64 @convert_float_to_i64(float %a) nounwind {
 ; LA32D-NEXT:    bl __fixsfdi
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_i64:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA64F-NEXT:    movfr2gr.s $a0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_i64:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
 ; LA64D-NEXT:    movfr2gr.d $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptosi float %a to i64
   ret i64 %1
 }
@@ -127,25 +127,25 @@ define zeroext i8 @convert_float_to_u8(float %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32F-NEXT:    movfr2gr.s $a0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_u8:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32D-NEXT:    movfr2gr.s $a0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_u8:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA64F-NEXT:    movfr2gr.s $a0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_u8:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
 ; LA64D-NEXT:    movfr2gr.d $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptoui float %a to i8
   ret i8 %1
 }
@@ -155,25 +155,25 @@ define zeroext i16 @convert_float_to_u16(float %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32F-NEXT:    movfr2gr.s $a0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_u16:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA32D-NEXT:    movfr2gr.s $a0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_u16:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
 ; LA64F-NEXT:    movfr2gr.s $a0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_u16:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
 ; LA64D-NEXT:    movfr2gr.d $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptoui float %a to i16
   ret i16 %1
 }
@@ -196,7 +196,7 @@ define i32 @convert_float_to_u32(float %a) nounwind {
 ; LA32F-NEXT:    movfr2gr.s $a2, $fa0
 ; LA32F-NEXT:    maskeqz $a1, $a2, $a1
 ; LA32F-NEXT:    or $a0, $a1, $a0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_u32:
 ; LA32D:       # %bb.0:
@@ -215,7 +215,7 @@ define i32 @convert_float_to_u32(float %a) nounwind {
 ; LA32D-NEXT:    movfr2gr.s $a2, $fa0
 ; LA32D-NEXT:    maskeqz $a1, $a2, $a1
 ; LA32D-NEXT:    or $a0, $a1, $a0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_u32:
 ; LA64F:       # %bb.0:
@@ -234,13 +234,13 @@ define i32 @convert_float_to_u32(float %a) nounwind {
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64F-NEXT:    maskeqz $a1, $a2, $a1
 ; LA64F-NEXT:    or $a0, $a1, $a0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_u32:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
 ; LA64D-NEXT:    movfr2gr.d $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptoui float %a to i32
   ret i32 %1
 }
@@ -253,7 +253,7 @@ define i64 @convert_float_to_u64(float %a) nounwind {
 ; LA32F-NEXT:    bl __fixunssfdi
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_float_to_u64:
 ; LA32D:       # %bb.0:
@@ -262,7 +262,7 @@ define i64 @convert_float_to_u64(float %a) nounwind {
 ; LA32D-NEXT:    bl __fixunssfdi
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_float_to_u64:
 ; LA64F:       # %bb.0:
@@ -281,7 +281,7 @@ define i64 @convert_float_to_u64(float %a) nounwind {
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64F-NEXT:    maskeqz $a1, $a2, $a1
 ; LA64F-NEXT:    or $a0, $a1, $a0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_float_to_u64:
 ; LA64D:       # %bb.0:
@@ -300,7 +300,7 @@ define i64 @convert_float_to_u64(float %a) nounwind {
 ; LA64D-NEXT:    movfr2gr.d $a2, $fa0
 ; LA64D-NEXT:    maskeqz $a1, $a2, $a1
 ; LA64D-NEXT:    or $a0, $a1, $a0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = fptoui float %a to i64
   ret i64 %1
 }
@@ -310,25 +310,25 @@ define float @convert_i8_to_float(i8 signext %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32F-NEXT:    ffint.s.w $fa0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_i8_to_float:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32D-NEXT:    ffint.s.w $fa0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_i8_to_float:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64F-NEXT:    ffint.s.w $fa0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_i8_to_float:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64D-NEXT:    ffint.s.w $fa0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = sitofp i8 %a to float
   ret float %1
 }
@@ -338,25 +338,25 @@ define float @convert_i16_to_float(i16 signext %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32F-NEXT:    ffint.s.w $fa0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_i16_to_float:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32D-NEXT:    ffint.s.w $fa0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_i16_to_float:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64F-NEXT:    ffint.s.w $fa0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_i16_to_float:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64D-NEXT:    ffint.s.w $fa0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = sitofp i16 %a to float
   ret float %1
 }
@@ -366,27 +366,27 @@ define float @convert_i32_to_float(i32 %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32F-NEXT:    ffint.s.w $fa0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_i32_to_float:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32D-NEXT:    ffint.s.w $fa0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_i32_to_float:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    addi.w $a0, $a0, 0
 ; LA64F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64F-NEXT:    ffint.s.w $fa0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_i32_to_float:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    addi.w $a0, $a0, 0
 ; LA64D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64D-NEXT:    ffint.s.w $fa0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = sitofp i32 %a to float
   ret float %1
 }
@@ -399,7 +399,7 @@ define float @convert_i64_to_float(i64 %a) nounwind {
 ; LA32F-NEXT:    bl __floatdisf
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_i64_to_float:
 ; LA32D:       # %bb.0:
@@ -408,19 +408,19 @@ define float @convert_i64_to_float(i64 %a) nounwind {
 ; LA32D-NEXT:    bl __floatdisf
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_i64_to_float:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64F-NEXT:    ffint.s.w $fa0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_i64_to_float:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64D-NEXT:    ffint.s.w $fa0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = sitofp i64 %a to float
   ret float %1
 }
@@ -430,25 +430,25 @@ define float @convert_u8_to_float(i8 zeroext %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32F-NEXT:    ffint.s.w $fa0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_u8_to_float:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32D-NEXT:    ffint.s.w $fa0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_u8_to_float:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64F-NEXT:    ffint.s.w $fa0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_u8_to_float:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64D-NEXT:    ffint.s.w $fa0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = uitofp i8 %a to float
   ret float %1
 }
@@ -458,25 +458,25 @@ define float @convert_u16_to_float(i16 zeroext %a) nounwind {
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32F-NEXT:    ffint.s.w $fa0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_u16_to_float:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32D-NEXT:    ffint.s.w $fa0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_u16_to_float:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64F-NEXT:    ffint.s.w $fa0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_u16_to_float:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64D-NEXT:    ffint.s.w $fa0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = uitofp i16 %a to float
   ret float %1
 }
@@ -495,7 +495,7 @@ define float @convert_u32_to_float(i32 %a) nounwind {
 ; LA32F-NEXT:    movgr2fr.w $fa1, $a0
 ; LA32F-NEXT:    ffint.s.w $fa1, $fa1
 ; LA32F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_u32_to_float:
 ; LA32D:       # %bb.0:
@@ -510,7 +510,7 @@ define float @convert_u32_to_float(i32 %a) nounwind {
 ; LA32D-NEXT:    fsub.d $fa0, $fa1, $fa0
 ; LA32D-NEXT:    fcvt.s.d $fa0, $fa0
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_u32_to_float:
 ; LA64F:       # %bb.0:
@@ -526,7 +526,7 @@ define float @convert_u32_to_float(i32 %a) nounwind {
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a0
 ; LA64F-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_u32_to_float:
 ; LA64D:       # %bb.0:
@@ -542,7 +542,7 @@ define float @convert_u32_to_float(i32 %a) nounwind {
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a0
 ; LA64D-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64D-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = uitofp i32 %a to float
   ret float %1
 }
@@ -555,7 +555,7 @@ define float @convert_u64_to_float(i64 %a) nounwind {
 ; LA32F-NEXT:    bl __floatundisf
 ; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    addi.w $sp, $sp, 16
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: convert_u64_to_float:
 ; LA32D:       # %bb.0:
@@ -564,7 +564,7 @@ define float @convert_u64_to_float(i64 %a) nounwind {
 ; LA32D-NEXT:    bl __floatundisf
 ; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32D-NEXT:    addi.w $sp, $sp, 16
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: convert_u64_to_float:
 ; LA64F:       # %bb.0:
@@ -579,7 +579,7 @@ define float @convert_u64_to_float(i64 %a) nounwind {
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a0
 ; LA64F-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: convert_u64_to_float:
 ; LA64D:       # %bb.0:
@@ -594,7 +594,7 @@ define float @convert_u64_to_float(i64 %a) nounwind {
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a0
 ; LA64D-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64D-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = uitofp i64 %a to float
   ret float %1
 }
@@ -603,22 +603,22 @@ define i32 @bitcast_float_to_i32(float %a) nounwind {
 ; LA32F-LABEL: bitcast_float_to_i32:
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    movfr2gr.s $a0, $fa0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: bitcast_float_to_i32:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    movfr2gr.s $a0, $fa0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: bitcast_float_to_i32:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    movfr2gr.s $a0, $fa0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: bitcast_float_to_i32:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    movfr2gr.s $a0, $fa0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = bitcast float %a to i32
   ret i32 %1
 }
@@ -627,22 +627,22 @@ define float @bitcast_i32_to_float(i32 %a) nounwind {
 ; LA32F-LABEL: bitcast_i32_to_float:
 ; LA32F:       # %bb.0:
 ; LA32F-NEXT:    movgr2fr.w $fa0, $a0
-; LA32F-NEXT:    jirl $zero, $ra, 0
+; LA32F-NEXT:    ret
 ;
 ; LA32D-LABEL: bitcast_i32_to_float:
 ; LA32D:       # %bb.0:
 ; LA32D-NEXT:    movgr2fr.w $fa0, $a0
-; LA32D-NEXT:    jirl $zero, $ra, 0
+; LA32D-NEXT:    ret
 ;
 ; LA64F-LABEL: bitcast_i32_to_float:
 ; LA64F:       # %bb.0:
 ; LA64F-NEXT:    movgr2fr.w $fa0, $a0
-; LA64F-NEXT:    jirl $zero, $ra, 0
+; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: bitcast_i32_to_float:
 ; LA64D:       # %bb.0:
 ; LA64D-NEXT:    movgr2fr.w $fa0, $a0
-; LA64D-NEXT:    jirl $zero, $ra, 0
+; LA64D-NEXT:    ret
   %1 = bitcast i32 %a to float
   ret float %1
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
index 78ee031c13015..d3acb566c2a2e 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define float @fmul_s(float %x, float %y) {
 ; LA32-LABEL: fmul_s:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fmul.s $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fmul_s:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fmul.s $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %mul = fmul float %x, %y
   ret float %mul
 }
@@ -21,12 +22,12 @@ define double @fmul_d(double %x, double %y) {
 ; LA32-LABEL: fmul_d:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fmul.d $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fmul_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fmul.d $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %mul = fmul double %x, %y
   ret double %mul
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fneg.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fneg.ll
index 3a8a4127d8e7a..da1952654191b 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fneg.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fneg.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define float @fneg_s(float %x) {
 ; LA32-LABEL: fneg_s:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fneg.s $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fneg_s:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fneg.s $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = fneg float %x
   ret float %neg
 }
@@ -21,12 +22,12 @@ define double @fneg_d(double %x) {
 ; LA32-LABEL: fneg_d:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fneg.d $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fneg_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fneg.d $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = fneg double %x
   ret double %neg
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
index 9ddf583d999c8..0aa0d634f183b 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define float @fsub_s(float %x, float %y) {
 ; LA32-LABEL: fsub_s:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fsub.s $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fsub_s:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fsub.s $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = fsub float %x, %y
   ret float %sub
 }
@@ -21,12 +22,12 @@ define double @fsub_d(double %x, double %y) {
 ; LA32-LABEL: fsub_d:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fsub.d $fa0, $fa0, $fa1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fsub_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fsub.d $fa0, $fa0, $fa1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = fsub double %x, %y
   ret double %sub
 }
@@ -35,12 +36,12 @@ define float @fneg_s(float %x) {
 ; LA32-LABEL: fneg_s:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fneg.s $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fneg_s:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fneg.s $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
     %res = fsub float -0.0, %x
     ret float %res
 }
@@ -49,12 +50,12 @@ define double @fneg_d(double %x) {
 ; LA32-LABEL: fneg_d:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fneg.d $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fneg_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fneg.d $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
     %res = fsub double -0.0, %x
     ret double %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll
index 947886e6b9dc5..605b3ab293787 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -8,13 +9,13 @@ define i1 @icmp_eq(i32 signext %a, i32 signext %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    xor $a0, $a0, $a1
 ; LA32-NEXT:    sltui $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_eq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    xor $a0, $a0, $a1
 ; LA64-NEXT:    sltui $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp eq i32 %a, %b
   ret i1 %res
 }
@@ -24,13 +25,13 @@ define i1 @icmp_ne(i32 signext %a, i32 signext %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    xor $a0, $a0, $a1
 ; LA32-NEXT:    sltu $a0, $zero, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_ne:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    xor $a0, $a0, $a1
 ; LA64-NEXT:    sltu $a0, $zero, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp ne i32 %a, %b
   ret i1 %res
 }
@@ -39,12 +40,12 @@ define i1 @icmp_ugt(i32 signext %a, i32 signext %b) {
 ; LA32-LABEL: icmp_ugt:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sltu $a0, $a1, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_ugt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp ugt i32 %a, %b
   ret i1 %res
 }
@@ -54,13 +55,13 @@ define i1 @icmp_uge(i32 signext %a, i32 signext %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sltu $a0, $a0, $a1
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_uge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a0, $a1
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp uge i32 %a, %b
   ret i1 %res
 }
@@ -69,12 +70,12 @@ define i1 @icmp_ult(i32 signext %a, i32 signext %b) {
 ; LA32-LABEL: icmp_ult:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sltu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_ult:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp ult i32 %a, %b
   ret i1 %res
 }
@@ -84,13 +85,13 @@ define i1 @icmp_ule(i32 signext %a, i32 signext %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sltu $a0, $a1, $a0
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_ule:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a1, $a0
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp ule i32 %a, %b
   ret i1 %res
 }
@@ -99,12 +100,12 @@ define i1 @icmp_sgt(i32 signext %a, i32 signext %b) {
 ; LA32-LABEL: icmp_sgt:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slt $a0, $a1, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_sgt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp sgt i32 %a, %b
   ret i1 %res
 }
@@ -114,13 +115,13 @@ define i1 @icmp_sge(i32 signext %a, i32 signext %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slt $a0, $a0, $a1
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_sge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a0, $a1
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp sge i32 %a, %b
   ret i1 %res
 }
@@ -129,12 +130,12 @@ define i1 @icmp_slt(i32 signext %a, i32 signext %b) {
 ; LA32-LABEL: icmp_slt:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slt $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_slt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp slt i32 %a, %b
   ret i1 %res
 }
@@ -144,13 +145,13 @@ define i1 @icmp_sle(i32 signext %a, i32 signext %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slt $a0, $a1, $a0
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_sle:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a1, $a0
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp sle i32 %a, %b
   ret i1 %res
 }
@@ -159,12 +160,12 @@ define i1 @icmp_slt_3(i32 signext %a) {
 ; LA32-LABEL: icmp_slt_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slti $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_slt_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slti $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp slt i32 %a, 3
   ret i1 %res
 }
@@ -173,12 +174,12 @@ define i1 @icmp_ult_3(i32 signext %a) {
 ; LA32-LABEL: icmp_ult_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sltui $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_ult_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltui $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp ult i32 %a, 3
   ret i1 %res
 }
@@ -187,12 +188,12 @@ define i1 @icmp_eq_0(i32 signext %a) {
 ; LA32-LABEL: icmp_eq_0:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sltui $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_eq_0:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltui $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp eq i32 %a, 0
   ret i1 %res
 }
@@ -202,13 +203,13 @@ define i1 @icmp_eq_3(i32 signext %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, -3
 ; LA32-NEXT:    sltui $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_eq_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, -3
 ; LA64-NEXT:    sltui $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp eq i32 %a, 3
   ret i1 %res
 }
@@ -217,12 +218,12 @@ define i1 @icmp_ne_0(i32 signext %a) {
 ; LA32-LABEL: icmp_ne_0:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sltu $a0, $zero, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_ne_0:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $zero, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp ne i32 %a, 0
   ret i1 %res
 }
@@ -232,13 +233,13 @@ define i1 @icmp_ne_3(i32 signext %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $a0, $a0, -3
 ; LA32-NEXT:    sltu $a0, $zero, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: icmp_ne_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $a0, $a0, -3
 ; LA64-NEXT:    sltu $a0, $zero, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = icmp ne i32 %a, 3
   ret i1 %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll
index abbd700f44f7d..cd60183a0933b 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll
@@ -1,12 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s
 
 define i32 @indirectbr(ptr %target) nounwind {
 ; CHECK-LABEL: indirectbr:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    jirl $zero, $a0, 0
+; CHECK-NEXT:    jr $a0
 ; CHECK-NEXT:  .LBB0_1: # %test_label
 ; CHECK-NEXT:    move $a0, $zero
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   indirectbr ptr %target, [label %test_label]
 test_label:
   br label %ret
@@ -20,7 +21,7 @@ define i32 @indirectbr_with_offset(ptr %a) nounwind {
 ; CHECK-NEXT:    jirl $zero, $a0, 1380
 ; CHECK-NEXT:  .LBB1_1: # %test_label
 ; CHECK-NEXT:    move $a0, $zero
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %target = getelementptr inbounds i8, ptr %a, i32 1380
   indirectbr ptr %target, [label %test_label]
 test_label:

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
index 1f06c818acf20..8dde4251fb262 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
@@ -7,13 +7,13 @@ define i8 @load_acquire_i8(ptr %ptr) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ld.b $a0, $a0, 0
 ; LA32-NEXT:    dbar 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: load_acquire_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.b $a0, $a0, 0
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load atomic i8, ptr %ptr acquire, align 1
   ret i8 %val
 }
@@ -23,13 +23,13 @@ define i16 @load_acquire_i16(ptr %ptr) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ld.h $a0, $a0, 0
 ; LA32-NEXT:    dbar 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: load_acquire_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.h $a0, $a0, 0
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load atomic i16, ptr %ptr acquire, align 2
   ret i16 %val
 }
@@ -39,13 +39,13 @@ define i32 @load_acquire_i32(ptr %ptr) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ld.w $a0, $a0, 0
 ; LA32-NEXT:    dbar 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: load_acquire_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.w $a0, $a0, 0
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load atomic i32, ptr %ptr acquire, align 4
   ret i32 %val
 }
@@ -61,13 +61,13 @@ define i64 @load_acquire_i64(ptr %ptr) {
 ; LA32-NEXT:    bl __atomic_load_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: load_acquire_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.d $a0, $a0, 0
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %val = load atomic i64, ptr %ptr acquire, align 8
   ret i64 %val
 }
@@ -77,13 +77,13 @@ define void @store_release_i8(ptr %ptr, i8 signext %v) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    dbar 0
 ; LA32-NEXT:    st.b $a0, $a1, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: store_release_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
 ; LA64-NEXT:    st.b $a0, $a1, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   store atomic i8 %v, ptr %ptr release, align 1
   ret void
 }
@@ -93,13 +93,13 @@ define void @store_release_i16(ptr %ptr, i16 signext %v) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    dbar 0
 ; LA32-NEXT:    st.h $a0, $a1, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: store_release_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
 ; LA64-NEXT:    st.h $a0, $a1, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   store atomic i16 %v, ptr %ptr release, align 2
   ret void
 }
@@ -109,13 +109,13 @@ define void @store_release_i32(ptr %ptr, i32 signext %v) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    dbar 0
 ; LA32-NEXT:    st.w $a0, $a1, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: store_release_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
 ; LA64-NEXT:    st.w $a0, $a1, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   store atomic i32 %v, ptr %ptr release, align 4
   ret void
 }
@@ -131,13 +131,13 @@ define void @store_release_i64(ptr %ptr, i64 %v) {
 ; LA32-NEXT:    bl __atomic_store_8
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: store_release_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    dbar 0
 ; LA64-NEXT:    st.d $a0, $a1, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   store atomic i64 %v, ptr %ptr release, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll
index e7b8769670552..12546b6a4151a 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll
@@ -27,7 +27,7 @@ define i32 @load_store_global() nounwind {
 ; LA64-NEXT:        addi.d $a0, $a0, 1
 ; LA64-NEXT:        st.w $a0, $a1, 0
 
-; ALL-NEXT:         jirl $zero, $ra, 0
+; ALL-NEXT:         ret
 
   %v = load i32, ptr @G
   %sum = add i32 %v, 1
@@ -62,7 +62,7 @@ define i32 @load_store_global_array(i32 %a) nounwind {
 ; LA64PIC-NEXT:     st.w $a0, $a2, 36
 
 ; ALL-NEXT:         move $a0, $a1
-; ALL-NEXT:         jirl $zero, $ra, 0
+; ALL-NEXT:         ret
 
   %1 = load volatile i32, ptr @arr, align 4
   store i32 %a, ptr @arr, align 4
@@ -80,13 +80,13 @@ define i64 @ld_b(ptr %a) nounwind {
 ; LA32-NEXT:    ld.b $a1, $a0, 0
 ; LA32-NEXT:    ld.b $a0, $a0, 1
 ; LA32-NEXT:    srai.w $a1, $a0, 31
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_b:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.b $a1, $a0, 0
 ; LA64-NEXT:    ld.b $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i8, ptr %a, i64 1
   %2 = load i8, ptr %1
   %3 = sext i8 %2 to i64
@@ -100,13 +100,13 @@ define i64 @ld_h(ptr %a) nounwind {
 ; LA32-NEXT:    ld.h $a1, $a0, 0
 ; LA32-NEXT:    ld.h $a0, $a0, 4
 ; LA32-NEXT:    srai.w $a1, $a0, 31
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_h:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.h $a1, $a0, 0
 ; LA64-NEXT:    ld.h $a0, $a0, 4
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i16, ptr %a, i64 2
   %2 = load i16, ptr %1
   %3 = sext i16 %2 to i64
@@ -120,13 +120,13 @@ define i64 @ld_w(ptr %a) nounwind {
 ; LA32-NEXT:    ld.w $a1, $a0, 0
 ; LA32-NEXT:    ld.w $a0, $a0, 12
 ; LA32-NEXT:    srai.w $a1, $a0, 31
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_w:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.w $a1, $a0, 0
 ; LA64-NEXT:    ld.w $a0, $a0, 12
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i32, ptr %a, i64 3
   %2 = load i32, ptr %1
   %3 = sext i32 %2 to i64
@@ -141,13 +141,13 @@ define i64 @ld_d(ptr %a) nounwind {
 ; LA32-NEXT:    ld.w $a1, $a0, 0
 ; LA32-NEXT:    ld.w $a1, $a0, 28
 ; LA32-NEXT:    ld.w $a0, $a0, 24
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.d $a1, $a0, 0
 ; LA64-NEXT:    ld.d $a0, $a0, 24
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i64, ptr %a, i64 3
   %2 = load i64, ptr %1
   %3 = load volatile i64, ptr %a
@@ -161,14 +161,14 @@ define i64 @ld_bu(ptr %a) nounwind {
 ; LA32-NEXT:    ld.bu $a2, $a0, 4
 ; LA32-NEXT:    add.w $a0, $a2, $a1
 ; LA32-NEXT:    sltu $a1, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_bu:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.bu $a1, $a0, 0
 ; LA64-NEXT:    ld.bu $a0, $a0, 4
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i8, ptr %a, i64 4
   %2 = load i8, ptr %1
   %3 = zext i8 %2 to i64
@@ -185,14 +185,14 @@ define i64 @ld_hu(ptr %a) nounwind {
 ; LA32-NEXT:    ld.hu $a2, $a0, 10
 ; LA32-NEXT:    add.w $a0, $a2, $a1
 ; LA32-NEXT:    sltu $a1, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_hu:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.hu $a1, $a0, 0
 ; LA64-NEXT:    ld.hu $a0, $a0, 10
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i16, ptr %a, i64 5
   %2 = load i16, ptr %1
   %3 = zext i16 %2 to i64
@@ -209,14 +209,14 @@ define i64 @ld_wu(ptr %a) nounwind {
 ; LA32-NEXT:    ld.w $a2, $a0, 20
 ; LA32-NEXT:    add.w $a0, $a2, $a1
 ; LA32-NEXT:    sltu $a1, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_wu:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.wu $a1, $a0, 0
 ; LA64-NEXT:    ld.wu $a0, $a0, 20
 ; LA64-NEXT:    add.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i32, ptr %a, i64 5
   %2 = load i32, ptr %1
   %3 = zext i32 %2 to i64
@@ -234,14 +234,14 @@ define i64 @ldx_b(ptr %a, i64 %idx) nounwind {
 ; LA32-NEXT:    ld.b $a0, $a0, 0
 ; LA32-NEXT:    srai.w $a1, $a2, 31
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ldx_b:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ldx.b $a1, $a0, $a1
 ; LA64-NEXT:    ld.b $a0, $a0, 0
 ; LA64-NEXT:    move $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i8, ptr %a, i64 %idx
   %2 = load i8, ptr %1
   %3 = sext i8 %2 to i64
@@ -258,7 +258,7 @@ define i64 @ldx_h(ptr %a, i64 %idx) nounwind {
 ; LA32-NEXT:    ld.h $a0, $a0, 0
 ; LA32-NEXT:    srai.w $a1, $a2, 31
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ldx_h:
 ; LA64:       # %bb.0:
@@ -266,7 +266,7 @@ define i64 @ldx_h(ptr %a, i64 %idx) nounwind {
 ; LA64-NEXT:    ldx.h $a1, $a0, $a1
 ; LA64-NEXT:    ld.h $a0, $a0, 0
 ; LA64-NEXT:    move $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i16, ptr %a, i64 %idx
   %2 = load i16, ptr %1
   %3 = sext i16 %2 to i64
@@ -283,7 +283,7 @@ define i64 @ldx_w(ptr %a, i64 %idx) nounwind {
 ; LA32-NEXT:    ld.w $a0, $a0, 0
 ; LA32-NEXT:    srai.w $a1, $a2, 31
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ldx_w:
 ; LA64:       # %bb.0:
@@ -291,7 +291,7 @@ define i64 @ldx_w(ptr %a, i64 %idx) nounwind {
 ; LA64-NEXT:    ldx.w $a1, $a0, $a1
 ; LA64-NEXT:    ld.w $a0, $a0, 0
 ; LA64-NEXT:    move $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i32, ptr %a, i64 %idx
   %2 = load i32, ptr %1
   %3 = sext i32 %2 to i64
@@ -309,7 +309,7 @@ define i64 @ldx_d(ptr %a, i64 %idx) nounwind {
 ; LA32-NEXT:    ld.w $a1, $a1, 4
 ; LA32-NEXT:    ld.w $a0, $a0, 4
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ldx_d:
 ; LA64:       # %bb.0:
@@ -317,7 +317,7 @@ define i64 @ldx_d(ptr %a, i64 %idx) nounwind {
 ; LA64-NEXT:    ldx.d $a1, $a0, $a1
 ; LA64-NEXT:    ld.d $a0, $a0, 0
 ; LA64-NEXT:    move $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i64, ptr %a, i64 %idx
   %2 = load i64, ptr %1
   %3 = load volatile i64, ptr %a
@@ -332,14 +332,14 @@ define i64 @ldx_bu(ptr %a, i64 %idx) nounwind {
 ; LA32-NEXT:    ld.bu $a0, $a0, 0
 ; LA32-NEXT:    add.w $a0, $a1, $a0
 ; LA32-NEXT:    sltu $a1, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ldx_bu:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ldx.bu $a1, $a0, $a1
 ; LA64-NEXT:    ld.bu $a0, $a0, 0
 ; LA64-NEXT:    add.d $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i8, ptr %a, i64 %idx
   %2 = load i8, ptr %1
   %3 = zext i8 %2 to i64
@@ -358,7 +358,7 @@ define i64 @ldx_hu(ptr %a, i64 %idx) nounwind {
 ; LA32-NEXT:    ld.hu $a0, $a0, 0
 ; LA32-NEXT:    add.w $a0, $a1, $a0
 ; LA32-NEXT:    sltu $a1, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ldx_hu:
 ; LA64:       # %bb.0:
@@ -366,7 +366,7 @@ define i64 @ldx_hu(ptr %a, i64 %idx) nounwind {
 ; LA64-NEXT:    ldx.hu $a1, $a0, $a1
 ; LA64-NEXT:    ld.hu $a0, $a0, 0
 ; LA64-NEXT:    add.d $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i16, ptr %a, i64 %idx
   %2 = load i16, ptr %1
   %3 = zext i16 %2 to i64
@@ -385,7 +385,7 @@ define i64 @ldx_wu(ptr %a, i64 %idx) nounwind {
 ; LA32-NEXT:    ld.w $a0, $a0, 0
 ; LA32-NEXT:    add.w $a0, $a1, $a0
 ; LA32-NEXT:    sltu $a1, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ldx_wu:
 ; LA64:       # %bb.0:
@@ -393,7 +393,7 @@ define i64 @ldx_wu(ptr %a, i64 %idx) nounwind {
 ; LA64-NEXT:    ldx.wu $a1, $a0, $a1
 ; LA64-NEXT:    ld.wu $a0, $a0, 0
 ; LA64-NEXT:    add.d $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i32, ptr %a, i64 %idx
   %2 = load i32, ptr %1
   %3 = zext i32 %2 to i64
@@ -410,7 +410,7 @@ define void @st_b(ptr %a, i8 %b) nounwind {
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    st.b $a1, $a0, 6
 ; ALL-NEXT:    st.b $a1, $a0, 0
-; ALL-NEXT:    jirl $zero, $ra, 0
+; ALL-NEXT:    ret
   store i8 %b, ptr %a
   %1 = getelementptr i8, ptr %a, i64 6
   store i8 %b, ptr %1
@@ -422,7 +422,7 @@ define void @st_h(ptr %a, i16 %b) nounwind {
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    st.h $a1, $a0, 14
 ; ALL-NEXT:    st.h $a1, $a0, 0
-; ALL-NEXT:    jirl $zero, $ra, 0
+; ALL-NEXT:    ret
   store i16 %b, ptr %a
   %1 = getelementptr i16, ptr %a, i64 7
   store i16 %b, ptr %1
@@ -434,7 +434,7 @@ define void @st_w(ptr %a, i32 %b) nounwind {
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    st.w $a1, $a0, 28
 ; ALL-NEXT:    st.w $a1, $a0, 0
-; ALL-NEXT:    jirl $zero, $ra, 0
+; ALL-NEXT:    ret
   store i32 %b, ptr %a
   %1 = getelementptr i32, ptr %a, i64 7
   store i32 %b, ptr %1
@@ -448,13 +448,13 @@ define void @st_d(ptr %a, i64 %b) nounwind {
 ; LA32-NEXT:    st.w $a2, $a0, 4
 ; LA32-NEXT:    st.w $a1, $a0, 64
 ; LA32-NEXT:    st.w $a1, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: st_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    st.d $a1, $a0, 64
 ; LA64-NEXT:    st.d $a1, $a0, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   store i64 %b, ptr %a
   %1 = getelementptr i64, ptr %a, i64 8
   store i64 %b, ptr %1
@@ -466,12 +466,12 @@ define void @stx_b(ptr %dst, i64 %idx, i8 %val) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    add.w $a0, $a0, $a1
 ; LA32-NEXT:    st.b $a3, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: stx_b:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    stx.b $a2, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i8, ptr %dst, i64 %idx
   store i8 %val, ptr %1
   ret void
@@ -483,13 +483,13 @@ define void @stx_h(ptr %dst, i64 %idx, i16 %val) nounwind {
 ; LA32-NEXT:    slli.w $a1, $a1, 1
 ; LA32-NEXT:    add.w $a0, $a0, $a1
 ; LA32-NEXT:    st.h $a3, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: stx_h:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 1
 ; LA64-NEXT:    stx.h $a2, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i16, ptr %dst, i64 %idx
   store i16 %val, ptr %1
   ret void
@@ -501,13 +501,13 @@ define void @stx_w(ptr %dst, i64 %idx, i32 %val) nounwind {
 ; LA32-NEXT:    slli.w $a1, $a1, 2
 ; LA32-NEXT:    add.w $a0, $a0, $a1
 ; LA32-NEXT:    st.w $a3, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: stx_w:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 2
 ; LA64-NEXT:    stx.w $a2, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i32, ptr %dst, i64 %idx
   store i32 %val, ptr %1
   ret void
@@ -520,13 +520,13 @@ define void @stx_d(ptr %dst, i64 %idx, i64 %val) nounwind {
 ; LA32-NEXT:    add.w $a0, $a0, $a1
 ; LA32-NEXT:    st.w $a4, $a0, 4
 ; LA32-NEXT:    st.w $a3, $a0, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: stx_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 3
 ; LA64-NEXT:    stx.d $a2, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i64, ptr %dst, i64 %idx
   store i64 %val, ptr %1
   ret void
@@ -543,7 +543,7 @@ define i64 @load_sext_zext_anyext_i1(ptr %a) nounwind {
 ; LA32-NEXT:    sub.w $a0, $a2, $a1
 ; LA32-NEXT:    sltu $a1, $a2, $a1
 ; LA32-NEXT:    sub.w $a1, $zero, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: load_sext_zext_anyext_i1:
 ; LA64:       # %bb.0:
@@ -551,7 +551,7 @@ define i64 @load_sext_zext_anyext_i1(ptr %a) nounwind {
 ; LA64-NEXT:    ld.bu $a1, $a0, 1
 ; LA64-NEXT:    ld.bu $a0, $a0, 2
 ; LA64-NEXT:    sub.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i1, ptr %a, i64 1
   %2 = load i1, ptr %1
   %3 = sext i1 %2 to i64
@@ -573,7 +573,7 @@ define i16 @load_sext_zext_anyext_i1_i16(ptr %a) nounwind {
 ; LA32-NEXT:    ld.bu $a1, $a0, 1
 ; LA32-NEXT:    ld.bu $a0, $a0, 2
 ; LA32-NEXT:    sub.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: load_sext_zext_anyext_i1_i16:
 ; LA64:       # %bb.0:
@@ -581,7 +581,7 @@ define i16 @load_sext_zext_anyext_i1_i16(ptr %a) nounwind {
 ; LA64-NEXT:    ld.bu $a1, $a0, 1
 ; LA64-NEXT:    ld.bu $a0, $a0, 2
 ; LA64-NEXT:    sub.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = getelementptr i1, ptr %a, i64 1
   %2 = load i1, ptr %1
   %3 = sext i1 %2 to i16
@@ -607,7 +607,7 @@ define i64 @ld_sd_constant(i64 %a) nounwind {
 ; LA32-NEXT:    st.w $a1, $a0, 0
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    move $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: ld_sd_constant:
 ; LA64:       # %bb.0:
@@ -618,7 +618,7 @@ define i64 @ld_sd_constant(i64 %a) nounwind {
 ; LA64-NEXT:    ld.d $a1, $a2, 0
 ; LA64-NEXT:    st.d $a0, $a2, 0
 ; LA64-NEXT:    move $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = inttoptr i64 16045690984833335023 to ptr
   %2 = load volatile i64, ptr %1
   store i64 %a, ptr %1
@@ -632,7 +632,7 @@ define float @load_store_float(ptr %a, float %b) nounwind {
 ; ALL-NEXT:    fld.s $fa1, $a0, 4
 ; ALL-NEXT:    fst.s $fa0, $a0, 4
 ; ALL-NEXT:    fmov.s $fa0, $fa1
-; ALL-NEXT:    jirl $zero, $ra, 0
+; ALL-NEXT:    ret
   %1 = getelementptr float, ptr %a, i64 1
   %2 = load float, ptr %1
   store float %b, ptr %1
@@ -646,7 +646,7 @@ define double @load_store_double(ptr %a, double %b) nounwind {
 ; ALL-NEXT:    fld.d $fa1, $a0, 8
 ; ALL-NEXT:    fst.d $fa0, $a0, 8
 ; ALL-NEXT:    fmov.d $fa0, $fa1
-; ALL-NEXT:    jirl $zero, $ra, 0
+; ALL-NEXT:    ret
   %1 = getelementptr double, ptr %a, i64 1
   %2 = load double, ptr %1
   store double %b, ptr %1

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll
index 2f63c64de8184..3916298e298f4 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -6,11 +7,11 @@
 define i1 @lshr_i1(i1 %x, i1 %y) {
 ; LA32-LABEL: lshr_i1:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i1 %x, %y
   ret i1 %lshr
 }
@@ -20,13 +21,13 @@ define i8 @lshr_i8(i8 %x, i8 %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 255
 ; LA32-NEXT:    srl.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 255
 ; LA64-NEXT:    srl.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i8 %x, %y
   ret i8 %lshr
 }
@@ -36,13 +37,13 @@ define i16 @lshr_i16(i16 %x, i16 %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
 ; LA32-NEXT:    srl.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
 ; LA64-NEXT:    srl.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i16 %x, %y
   ret i16 %lshr
 }
@@ -51,12 +52,12 @@ define i32 @lshr_i32(i32 %x, i32 %y) {
 ; LA32-LABEL: lshr_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    srl.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    srl.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i32 %x, %y
   ret i32 %lshr
 }
@@ -78,12 +79,12 @@ define i64 @lshr_i64(i64 %x, i64 %y) {
 ; LA32-NEXT:    srl.w $a1, $a1, $a2
 ; LA32-NEXT:    srai.w $a2, $a3, 31
 ; LA32-NEXT:    and $a1, $a2, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    srl.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i64 %x, %y
   ret i64 %lshr
 }
@@ -91,11 +92,11 @@ define i64 @lshr_i64(i64 %x, i64 %y) {
 define i1 @lshr_i1_3(i1 %x) {
 ; LA32-LABEL: lshr_i1_3:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i1_3:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i1 %x, 3
   ret i1 %lshr
 }
@@ -104,12 +105,12 @@ define i8 @lshr_i8_3(i8 %x) {
 ; LA32-LABEL: lshr_i8_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bstrpick.w $a0, $a0, 7, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i8_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 7, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i8 %x, 3
   ret i8 %lshr
 }
@@ -118,12 +119,12 @@ define i16 @lshr_i16_3(i16 %x) {
 ; LA32-LABEL: lshr_i16_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i16_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i16 %x, 3
   ret i16 %lshr
 }
@@ -132,12 +133,12 @@ define i32 @lshr_i32_3(i32 %x) {
 ; LA32-LABEL: lshr_i32_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    srli.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i32_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i32 %x, 3
   ret i32 %lshr
 }
@@ -149,12 +150,12 @@ define i64 @lshr_i64_3(i64 %x) {
 ; LA32-NEXT:    slli.w $a2, $a1, 29
 ; LA32-NEXT:    or $a0, $a0, $a2
 ; LA32-NEXT:    srli.w $a1, $a1, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: lshr_i64_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    srli.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %lshr = lshr i64 %x, 3
   ret i64 %lshr
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
index 0d31e790cf729..c05f136785e26 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
@@ -8,12 +8,12 @@ define i1 @mul_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: mul_i1:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    mul.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i1:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = mul i1 %a, %b
   ret i1 %r
@@ -23,12 +23,12 @@ define i8 @mul_i8(i8 %a, i8 %b) {
 ; LA32-LABEL: mul_i8:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    mul.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = mul i8 %a, %b
   ret i8 %r
@@ -38,12 +38,12 @@ define i16 @mul_i16(i16 %a, i16 %b) {
 ; LA32-LABEL: mul_i16:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    mul.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = mul i16 %a, %b
   ret i16 %r
@@ -53,12 +53,12 @@ define i32 @mul_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: mul_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    mul.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = mul i32 %a, %b
   ret i32 %r
@@ -73,12 +73,12 @@ define i64 @mul_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    mul.w $a1, $a1, $a2
 ; LA32-NEXT:    add.w $a1, $a3, $a1
 ; LA32-NEXT:    mul.w $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = mul i64 %a, %b
   ret i64 %r
@@ -91,12 +91,12 @@ define i64 @mul_pow2(i64 %a) {
 ; LA32-NEXT:    srli.w $a2, $a0, 29
 ; LA32-NEXT:    or $a1, $a1, $a2
 ; LA32-NEXT:    slli.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_pow2:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = mul i64 %a, 8
   ret i64 %1
 }
@@ -109,13 +109,13 @@ define i64 @mul_p5(i64 %a) {
 ; LA32-NEXT:    mulh.wu $a3, $a0, $a2
 ; LA32-NEXT:    add.w $a1, $a3, $a1
 ; LA32-NEXT:    mul.w $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_p5:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ori $a1, $zero, 5
 ; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = mul i64 %a, 5
   ret i64 %1
 }
@@ -124,13 +124,13 @@ define i32 @mulh_w(i32 %a, i32 %b) {
 ; LA32-LABEL: mulh_w:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    mulh.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mulh_w:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    mulw.d.w $a0, $a0, $a1
 ; LA64-NEXT:    srli.d $a0, $a0, 32
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i32 %a to i64
   %2 = sext i32 %b to i64
   %3 = mul i64 %1, %2
@@ -143,13 +143,13 @@ define i32 @mulh_wu(i32 %a, i32 %b) {
 ; LA32-LABEL: mulh_wu:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    mulh.wu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mulh_wu:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    mulw.d.wu $a0, $a0, $a1
 ; LA64-NEXT:    srli.d $a0, $a0, 32
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i32 %a to i64
   %2 = zext i32 %b to i64
   %3 = mul i64 %1, %2
@@ -200,12 +200,12 @@ define i64 @mulh_d(i64 %a, i64 %b) {
 ; LA32-NEXT:    add.w $a0, $a4, $a2
 ; LA32-NEXT:    sltu $a2, $a0, $a4
 ; LA32-NEXT:    add.w $a1, $a1, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mulh_d:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    mulh.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i64 %a to i128
   %2 = sext i64 %b to i128
   %3 = mul i128 %1, %2
@@ -236,12 +236,12 @@ define i64 @mulh_du(i64 %a, i64 %b) {
 ; LA32-NEXT:    add.w $a0, $a4, $a0
 ; LA32-NEXT:    sltu $a2, $a0, $a4
 ; LA32-NEXT:    add.w $a1, $a1, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mulh_du:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    mulh.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i64 %a to i128
   %2 = zext i64 %b to i128
   %3 = mul i128 %1, %2
@@ -256,12 +256,12 @@ define i64 @mulw_d_w(i32 %a, i32 %b) {
 ; LA32-NEXT:    mul.w $a2, $a0, $a1
 ; LA32-NEXT:    mulh.w $a1, $a0, $a1
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mulw_d_w:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    mulw.d.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i32 %a to i64
   %2 = sext i32 %b to i64
   %3 = mul i64 %1, %2
@@ -274,12 +274,12 @@ define i64 @mulw_d_wu(i32 %a, i32 %b) {
 ; LA32-NEXT:    mul.w $a2, $a0, $a1
 ; LA32-NEXT:    mulh.wu $a1, $a0, $a1
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mulw_d_wu:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    mulw.d.wu $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i32 %a to i64
   %2 = zext i32 %b to i64
   %3 = mul i64 %1, %2

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/or.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/or.ll
index 37006573244b5..ead72507d751a 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/or.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/or.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define i1 @or_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: or_i1:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i1:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i1 %a, %b
   ret i1 %r
@@ -22,12 +23,12 @@ define i8 @or_i8(i8 %a, i8 %b) {
 ; LA32-LABEL: or_i8:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i8 %a, %b
   ret i8 %r
@@ -37,12 +38,12 @@ define i16 @or_i16(i16 %a, i16 %b) {
 ; LA32-LABEL: or_i16:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i16 %a, %b
   ret i16 %r
@@ -52,12 +53,12 @@ define i32 @or_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: or_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i32 %a, %b
   ret i32 %r
@@ -68,12 +69,12 @@ define i64 @or_i64(i64 %a, i64 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    or $a0, $a0, $a2
 ; LA32-NEXT:    or $a1, $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i64 %a, %b
   ret i64 %r
@@ -82,11 +83,11 @@ entry:
 define i1 @or_i1_0(i1 %b) {
 ; LA32-LABEL: or_i1_0:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i1_0:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i1 4, %b
   ret i1 %r
@@ -96,12 +97,12 @@ define i1 @or_i1_5(i1 %b) {
 ; LA32-LABEL: or_i1_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $zero, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i1_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $zero, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i1 5, %b
   ret i1 %r
@@ -111,12 +112,12 @@ define i8 @or_i8_5(i8 %b) {
 ; LA32-LABEL: or_i8_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i8_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i8 5, %b
   ret i8 %r
@@ -126,12 +127,12 @@ define i8 @or_i8_257(i8 %b) {
 ; LA32-LABEL: or_i8_257:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i8_257:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i8 257, %b
   ret i8 %r
@@ -141,12 +142,12 @@ define i16 @or_i16_5(i16 %b) {
 ; LA32-LABEL: or_i16_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i16_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i16 5, %b
   ret i16 %r
@@ -157,13 +158,13 @@ define i16 @or_i16_0x1000(i16 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a1, 1
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i16_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i16 4096, %b
   ret i16 %r
@@ -173,12 +174,12 @@ define i16 @or_i16_0x10001(i16 %b) {
 ; LA32-LABEL: or_i16_0x10001:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i16_0x10001:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i16 65537, %b
   ret i16 %r
@@ -188,12 +189,12 @@ define i32 @or_i32_5(i32 %b) {
 ; LA32-LABEL: or_i32_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i32_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i32 5, %b
   ret i32 %r
@@ -204,13 +205,13 @@ define i32 @or_i32_0x1000(i32 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a1, 1
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i32_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i32 4096, %b
   ret i32 %r
@@ -220,12 +221,12 @@ define i32 @or_i32_0x100000001(i32 %b) {
 ; LA32-LABEL: or_i32_0x100000001:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i32_0x100000001:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i32 4294967297, %b
   ret i32 %r
@@ -235,12 +236,12 @@ define i64 @or_i64_5(i64 %b) {
 ; LA32-LABEL: or_i64_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    ori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i64_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i64 5, %b
   ret i64 %r
@@ -251,13 +252,13 @@ define i64 @or_i64_0x1000(i64 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a2, 1
 ; LA32-NEXT:    or $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: or_i64_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = or i64 4096, %b
   ret i64 %r

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
index b831cb51565ed..a5d9214597bae 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
@@ -11,19 +11,19 @@
 define i1 @sdiv_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: sdiv_i1:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sdiv_i1:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: sdiv_i1:
 ; LA32-TRAP:       # %bb.0: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: sdiv_i1:
 ; LA64-TRAP:       # %bb.0: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = sdiv i1 %a, %b
   ret i1 %r
@@ -35,14 +35,14 @@ define i8 @sdiv_i8(i8 %a, i8 %b) {
 ; LA32-NEXT:    ext.w.b $a1, $a1
 ; LA32-NEXT:    ext.w.b $a0, $a0
 ; LA32-NEXT:    div.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sdiv_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    ext.w.b $a0, $a0
 ; LA64-NEXT:    div.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: sdiv_i8:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -53,7 +53,7 @@ define i8 @sdiv_i8(i8 %a, i8 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB1_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: sdiv_i8:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -64,7 +64,7 @@ define i8 @sdiv_i8(i8 %a, i8 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB1_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = sdiv i8 %a, %b
   ret i8 %r
@@ -76,14 +76,14 @@ define i16 @sdiv_i16(i16 %a, i16 %b) {
 ; LA32-NEXT:    ext.w.h $a1, $a1
 ; LA32-NEXT:    ext.w.h $a0, $a0
 ; LA32-NEXT:    div.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sdiv_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    ext.w.h $a0, $a0
 ; LA64-NEXT:    div.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: sdiv_i16:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -94,7 +94,7 @@ define i16 @sdiv_i16(i16 %a, i16 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB2_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: sdiv_i16:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -105,7 +105,7 @@ define i16 @sdiv_i16(i16 %a, i16 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB2_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = sdiv i16 %a, %b
   ret i16 %r
@@ -115,14 +115,14 @@ define i32 @sdiv_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: sdiv_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    div.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sdiv_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    div.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: sdiv_i32:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -131,7 +131,7 @@ define i32 @sdiv_i32(i32 %a, i32 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB3_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: sdiv_i32:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -142,7 +142,7 @@ define i32 @sdiv_i32(i32 %a, i32 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB3_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = sdiv i32 %a, %b
   ret i32 %r
@@ -158,12 +158,12 @@ define i64 @sdiv_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    bl __divdi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sdiv_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    div.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: sdiv_i64:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -174,7 +174,7 @@ define i64 @sdiv_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    bl __divdi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: sdiv_i64:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -183,7 +183,7 @@ define i64 @sdiv_i64(i64 %a, i64 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB4_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = sdiv i64 %a, %b
   ret i64 %r
@@ -192,19 +192,19 @@ entry:
 define i1 @udiv_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: udiv_i1:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: udiv_i1:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: udiv_i1:
 ; LA32-TRAP:       # %bb.0: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: udiv_i1:
 ; LA64-TRAP:       # %bb.0: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = udiv i1 %a, %b
   ret i1 %r
@@ -216,14 +216,14 @@ define i8 @udiv_i8(i8 %a, i8 %b) {
 ; LA32-NEXT:    andi $a1, $a1, 255
 ; LA32-NEXT:    andi $a0, $a0, 255
 ; LA32-NEXT:    div.wu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: udiv_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    andi $a0, $a0, 255
 ; LA64-NEXT:    div.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: udiv_i8:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -234,7 +234,7 @@ define i8 @udiv_i8(i8 %a, i8 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB6_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: udiv_i8:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -245,7 +245,7 @@ define i8 @udiv_i8(i8 %a, i8 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB6_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = udiv i8 %a, %b
   ret i8 %r
@@ -257,14 +257,14 @@ define i16 @udiv_i16(i16 %a, i16 %b) {
 ; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
 ; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
 ; LA32-NEXT:    div.wu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: udiv_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
 ; LA64-NEXT:    div.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: udiv_i16:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -275,7 +275,7 @@ define i16 @udiv_i16(i16 %a, i16 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB7_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: udiv_i16:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -286,7 +286,7 @@ define i16 @udiv_i16(i16 %a, i16 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB7_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = udiv i16 %a, %b
   ret i16 %r
@@ -296,14 +296,14 @@ define i32 @udiv_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: udiv_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    div.wu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: udiv_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 31, 0
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
 ; LA64-NEXT:    div.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: udiv_i32:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -312,7 +312,7 @@ define i32 @udiv_i32(i32 %a, i32 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB8_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: udiv_i32:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -323,7 +323,7 @@ define i32 @udiv_i32(i32 %a, i32 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB8_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = udiv i32 %a, %b
   ret i32 %r
@@ -339,12 +339,12 @@ define i64 @udiv_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    bl __udivdi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: udiv_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    div.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: udiv_i64:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -355,7 +355,7 @@ define i64 @udiv_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    bl __udivdi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: udiv_i64:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -364,7 +364,7 @@ define i64 @udiv_i64(i64 %a, i64 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB9_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = udiv i64 %a, %b
   ret i64 %r
@@ -374,22 +374,22 @@ define i1 @srem_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: srem_i1:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    move $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srem_i1:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    move $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: srem_i1:
 ; LA32-TRAP:       # %bb.0: # %entry
 ; LA32-TRAP-NEXT:    move $a0, $zero
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: srem_i1:
 ; LA64-TRAP:       # %bb.0: # %entry
 ; LA64-TRAP-NEXT:    move $a0, $zero
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = srem i1 %a, %b
   ret i1 %r
@@ -401,14 +401,14 @@ define i8 @srem_i8(i8 %a, i8 %b) {
 ; LA32-NEXT:    ext.w.b $a1, $a1
 ; LA32-NEXT:    ext.w.b $a0, $a0
 ; LA32-NEXT:    mod.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srem_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    ext.w.b $a0, $a0
 ; LA64-NEXT:    mod.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: srem_i8:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -419,7 +419,7 @@ define i8 @srem_i8(i8 %a, i8 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB11_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: srem_i8:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -430,7 +430,7 @@ define i8 @srem_i8(i8 %a, i8 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB11_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = srem i8 %a, %b
   ret i8 %r
@@ -442,14 +442,14 @@ define i16 @srem_i16(i16 %a, i16 %b) {
 ; LA32-NEXT:    ext.w.h $a1, $a1
 ; LA32-NEXT:    ext.w.h $a0, $a0
 ; LA32-NEXT:    mod.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srem_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    ext.w.h $a0, $a0
 ; LA64-NEXT:    mod.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: srem_i16:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -460,7 +460,7 @@ define i16 @srem_i16(i16 %a, i16 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB12_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: srem_i16:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -471,7 +471,7 @@ define i16 @srem_i16(i16 %a, i16 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB12_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = srem i16 %a, %b
   ret i16 %r
@@ -481,14 +481,14 @@ define i32 @srem_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: srem_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    mod.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srem_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    mod.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: srem_i32:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -497,7 +497,7 @@ define i32 @srem_i32(i32 %a, i32 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB13_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: srem_i32:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -508,7 +508,7 @@ define i32 @srem_i32(i32 %a, i32 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB13_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = srem i32 %a, %b
   ret i32 %r
@@ -524,12 +524,12 @@ define i64 @srem_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    bl __moddi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srem_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    mod.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: srem_i64:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -540,7 +540,7 @@ define i64 @srem_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    bl __moddi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: srem_i64:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -549,7 +549,7 @@ define i64 @srem_i64(i64 %a, i64 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB14_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = srem i64 %a, %b
   ret i64 %r
@@ -559,22 +559,22 @@ define i1 @urem_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: urem_i1:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    move $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: urem_i1:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    move $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: urem_i1:
 ; LA32-TRAP:       # %bb.0: # %entry
 ; LA32-TRAP-NEXT:    move $a0, $zero
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: urem_i1:
 ; LA64-TRAP:       # %bb.0: # %entry
 ; LA64-TRAP-NEXT:    move $a0, $zero
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = urem i1 %a, %b
   ret i1 %r
@@ -586,14 +586,14 @@ define i8 @urem_i8(i8 %a, i8 %b) {
 ; LA32-NEXT:    andi $a1, $a1, 255
 ; LA32-NEXT:    andi $a0, $a0, 255
 ; LA32-NEXT:    mod.wu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: urem_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    andi $a0, $a0, 255
 ; LA64-NEXT:    mod.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: urem_i8:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -604,7 +604,7 @@ define i8 @urem_i8(i8 %a, i8 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB16_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: urem_i8:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -615,7 +615,7 @@ define i8 @urem_i8(i8 %a, i8 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB16_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = urem i8 %a, %b
   ret i8 %r
@@ -627,14 +627,14 @@ define i16 @urem_i16(i16 %a, i16 %b) {
 ; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
 ; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
 ; LA32-NEXT:    mod.wu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: urem_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
 ; LA64-NEXT:    mod.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: urem_i16:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -645,7 +645,7 @@ define i16 @urem_i16(i16 %a, i16 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB17_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: urem_i16:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -656,7 +656,7 @@ define i16 @urem_i16(i16 %a, i16 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB17_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = urem i16 %a, %b
   ret i16 %r
@@ -666,14 +666,14 @@ define i32 @urem_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: urem_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    mod.wu $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: urem_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 31, 0
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
 ; LA64-NEXT:    mod.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: urem_i32:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -682,7 +682,7 @@ define i32 @urem_i32(i32 %a, i32 %b) {
 ; LA32-TRAP-NEXT:  # %bb.1: # %entry
 ; LA32-TRAP-NEXT:    break 7
 ; LA32-TRAP-NEXT:  .LBB18_2: # %entry
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: urem_i32:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -693,7 +693,7 @@ define i32 @urem_i32(i32 %a, i32 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB18_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = urem i32 %a, %b
   ret i32 %r
@@ -709,12 +709,12 @@ define i64 @urem_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    bl __umoddi3
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: urem_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    mod.du $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 ;
 ; LA32-TRAP-LABEL: urem_i64:
 ; LA32-TRAP:       # %bb.0: # %entry
@@ -725,7 +725,7 @@ define i64 @urem_i64(i64 %a, i64 %b) {
 ; LA32-TRAP-NEXT:    bl __umoddi3
 ; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
-; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA32-TRAP-NEXT:    ret
 ;
 ; LA64-TRAP-LABEL: urem_i64:
 ; LA64-TRAP:       # %bb.0: # %entry
@@ -734,7 +734,7 @@ define i64 @urem_i64(i64 %a, i64 %b) {
 ; LA64-TRAP-NEXT:  # %bb.1: # %entry
 ; LA64-TRAP-NEXT:    break 7
 ; LA64-TRAP-NEXT:  .LBB19_2: # %entry
-; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+; LA64-TRAP-NEXT:    ret
 entry:
   %r = urem i64 %a, %b
   ret i64 %r

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll
index 4c6026aba5acf..c26519de35cd9 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -10,14 +11,14 @@ define double @test(i1 %a, double %b, double %c) {
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = select i1 %a, double %b, double %c
   ret double %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll
index af4789b522586..a625fd4789066 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
 
@@ -10,14 +11,14 @@ define float @test(i1 %a, float %b, float %c) {
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = select i1 %a, float %b, float %c
   ret float %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll
index 3481e79b248bd..ddbc4ad719446 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -10,7 +11,7 @@ define i1 @bare_select_i1(i1 %a, i1 %b, i1 %c) {
 ; LA32-NEXT:    masknez $a2, $a2, $a0
 ; LA32-NEXT:    maskeqz $a0, $a1, $a0
 ; LA32-NEXT:    or $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: bare_select_i1:
 ; LA64:       # %bb.0:
@@ -18,7 +19,7 @@ define i1 @bare_select_i1(i1 %a, i1 %b, i1 %c) {
 ; LA64-NEXT:    masknez $a2, $a2, $a0
 ; LA64-NEXT:    maskeqz $a0, $a1, $a0
 ; LA64-NEXT:    or $a0, $a0, $a2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = select i1 %a, i1 %b, i1 %c
   ret i1 %res
 }
@@ -30,7 +31,7 @@ define i8 @bare_select_i8(i1 %a, i8 %b, i8 %c) {
 ; LA32-NEXT:    masknez $a2, $a2, $a0
 ; LA32-NEXT:    maskeqz $a0, $a1, $a0
 ; LA32-NEXT:    or $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: bare_select_i8:
 ; LA64:       # %bb.0:
@@ -38,7 +39,7 @@ define i8 @bare_select_i8(i1 %a, i8 %b, i8 %c) {
 ; LA64-NEXT:    masknez $a2, $a2, $a0
 ; LA64-NEXT:    maskeqz $a0, $a1, $a0
 ; LA64-NEXT:    or $a0, $a0, $a2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = select i1 %a, i8 %b, i8 %c
   ret i8 %res
 }
@@ -50,7 +51,7 @@ define i16 @bare_select_i16(i1 %a, i16 %b, i16 %c) {
 ; LA32-NEXT:    masknez $a2, $a2, $a0
 ; LA32-NEXT:    maskeqz $a0, $a1, $a0
 ; LA32-NEXT:    or $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: bare_select_i16:
 ; LA64:       # %bb.0:
@@ -58,7 +59,7 @@ define i16 @bare_select_i16(i1 %a, i16 %b, i16 %c) {
 ; LA64-NEXT:    masknez $a2, $a2, $a0
 ; LA64-NEXT:    maskeqz $a0, $a1, $a0
 ; LA64-NEXT:    or $a0, $a0, $a2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = select i1 %a, i16 %b, i16 %c
   ret i16 %res
 }
@@ -70,7 +71,7 @@ define i32 @bare_select_i32(i1 %a, i32 %b, i32 %c) {
 ; LA32-NEXT:    masknez $a2, $a2, $a0
 ; LA32-NEXT:    maskeqz $a0, $a1, $a0
 ; LA32-NEXT:    or $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: bare_select_i32:
 ; LA64:       # %bb.0:
@@ -78,7 +79,7 @@ define i32 @bare_select_i32(i1 %a, i32 %b, i32 %c) {
 ; LA64-NEXT:    masknez $a2, $a2, $a0
 ; LA64-NEXT:    maskeqz $a0, $a1, $a0
 ; LA64-NEXT:    or $a0, $a0, $a2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = select i1 %a, i32 %b, i32 %c
   ret i32 %res
 }
@@ -93,7 +94,7 @@ define i64 @bare_select_i64(i1 %a, i64 %b, i64 %c) {
 ; LA32-NEXT:    masknez $a1, $a4, $a5
 ; LA32-NEXT:    maskeqz $a2, $a2, $a5
 ; LA32-NEXT:    or $a1, $a2, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: bare_select_i64:
 ; LA64:       # %bb.0:
@@ -101,7 +102,7 @@ define i64 @bare_select_i64(i1 %a, i64 %b, i64 %c) {
 ; LA64-NEXT:    masknez $a2, $a2, $a0
 ; LA64-NEXT:    maskeqz $a0, $a1, $a0
 ; LA64-NEXT:    or $a0, $a0, $a2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %res = select i1 %a, i64 %b, i64 %c
   ret i64 %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll
index 4397b64d927be..8d26996b86fe0 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define double @fcmp_false(double %a, double %b, double %x, double %y) {
 ; LA32-LABEL: fcmp_false:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fmov.d $fa0, $fa3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_false:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fmov.d $fa0, $fa3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp false double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -23,13 +24,13 @@ define double @fcmp_oeq(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oeq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oeq double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -40,13 +41,13 @@ define double @fcmp_ogt(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ogt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ogt double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -57,13 +58,13 @@ define double @fcmp_oge(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oge double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -74,13 +75,13 @@ define double @fcmp_olt(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_olt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp olt double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -91,13 +92,13 @@ define double @fcmp_ole(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ole:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ole double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -108,13 +109,13 @@ define double @fcmp_one(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_one:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp one double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -125,13 +126,13 @@ define double @fcmp_ord(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ord:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ord double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -142,13 +143,13 @@ define double @fcmp_ueq(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ueq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ueq double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -159,13 +160,13 @@ define double @fcmp_ugt(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ugt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ugt double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -176,13 +177,13 @@ define double @fcmp_uge(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uge double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -193,13 +194,13 @@ define double @fcmp_ult(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ult:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ult double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -210,13 +211,13 @@ define double @fcmp_ule(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ule:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ule double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -227,13 +228,13 @@ define double @fcmp_une(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_une:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp une double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -244,13 +245,13 @@ define double @fcmp_uno(double %a, double %b, double %x, double %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uno:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uno double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res
@@ -260,12 +261,12 @@ define double @fcmp_true(double %a, double %b, double %x, double %y) {
 ; LA32-LABEL: fcmp_true:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fmov.d $fa0, $fa2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_true:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fmov.d $fa0, $fa2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp true double %a, %b
   %res = select i1 %cmp, double %x, double %y
   ret double %res

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll
index 23d71493cb4be..1f6d2313ab72c 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define float @fcmp_false(float %a, float %b, float %x, float %y) {
 ; LA32-LABEL: fcmp_false:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fmov.s $fa0, $fa3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_false:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fmov.s $fa0, $fa3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp false float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -23,13 +24,13 @@ define float @fcmp_oeq(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oeq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oeq float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -40,13 +41,13 @@ define float @fcmp_ogt(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ogt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ogt float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -57,13 +58,13 @@ define float @fcmp_oge(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_oge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oge float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -74,13 +75,13 @@ define float @fcmp_olt(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_olt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp olt float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -91,13 +92,13 @@ define float @fcmp_ole(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ole:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ole float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -108,13 +109,13 @@ define float @fcmp_one(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_one:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp one float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -125,13 +126,13 @@ define float @fcmp_ord(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ord:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ord float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -142,13 +143,13 @@ define float @fcmp_ueq(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ueq:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ueq float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -159,13 +160,13 @@ define float @fcmp_ugt(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ugt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ugt float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -176,13 +177,13 @@ define float @fcmp_uge(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uge:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uge float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -193,13 +194,13 @@ define float @fcmp_ult(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ult:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ult float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -210,13 +211,13 @@ define float @fcmp_ule(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_ule:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ule float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -227,13 +228,13 @@ define float @fcmp_une(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_une:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp une float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -244,13 +245,13 @@ define float @fcmp_uno(float %a, float %b, float %x, float %y) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
 ; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_uno:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
 ; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uno float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res
@@ -260,12 +261,12 @@ define float @fcmp_true(float %a, float %b, float %x, float %y) {
 ; LA32-LABEL: fcmp_true:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    fmov.s $fa0, $fa2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: fcmp_true:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    fmov.s $fa0, $fa2
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp true float %a, %b
   %res = select i1 %cmp, float %x, float %y
   ret float %res

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll
index 9e742ee576cbb..3e88181a11fe8 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define i32 @f32_fcmp_false(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-LABEL: f32_fcmp_false:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_false:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    move $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp false float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -26,7 +27,7 @@ define i32 @f32_fcmp_oeq(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_oeq:
 ; LA64:       # %bb.0:
@@ -35,7 +36,7 @@ define i32 @f32_fcmp_oeq(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oeq float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -49,7 +50,7 @@ define i32 @f32_fcmp_ogt(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_ogt:
 ; LA64:       # %bb.0:
@@ -58,7 +59,7 @@ define i32 @f32_fcmp_ogt(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ogt float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -72,7 +73,7 @@ define i32 @f32_fcmp_oge(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_oge:
 ; LA64:       # %bb.0:
@@ -81,7 +82,7 @@ define i32 @f32_fcmp_oge(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oge float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -95,7 +96,7 @@ define i32 @f32_fcmp_olt(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_olt:
 ; LA64:       # %bb.0:
@@ -104,7 +105,7 @@ define i32 @f32_fcmp_olt(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp olt float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -118,7 +119,7 @@ define i32 @f32_fcmp_ole(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_ole:
 ; LA64:       # %bb.0:
@@ -127,7 +128,7 @@ define i32 @f32_fcmp_ole(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ole float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -141,7 +142,7 @@ define i32 @f32_fcmp_one(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_one:
 ; LA64:       # %bb.0:
@@ -150,7 +151,7 @@ define i32 @f32_fcmp_one(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp one float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -164,7 +165,7 @@ define i32 @f32_fcmp_ord(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_ord:
 ; LA64:       # %bb.0:
@@ -173,7 +174,7 @@ define i32 @f32_fcmp_ord(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ord float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -187,7 +188,7 @@ define i32 @f32_fcmp_ueq(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_ueq:
 ; LA64:       # %bb.0:
@@ -196,7 +197,7 @@ define i32 @f32_fcmp_ueq(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ueq float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -210,7 +211,7 @@ define i32 @f32_fcmp_ugt(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_ugt:
 ; LA64:       # %bb.0:
@@ -219,7 +220,7 @@ define i32 @f32_fcmp_ugt(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ugt float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -233,7 +234,7 @@ define i32 @f32_fcmp_uge(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_uge:
 ; LA64:       # %bb.0:
@@ -242,7 +243,7 @@ define i32 @f32_fcmp_uge(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uge float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -256,7 +257,7 @@ define i32 @f32_fcmp_ult(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_ult:
 ; LA64:       # %bb.0:
@@ -265,7 +266,7 @@ define i32 @f32_fcmp_ult(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ult float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -279,7 +280,7 @@ define i32 @f32_fcmp_ule(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_ule:
 ; LA64:       # %bb.0:
@@ -288,7 +289,7 @@ define i32 @f32_fcmp_ule(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ule float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -302,7 +303,7 @@ define i32 @f32_fcmp_une(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_une:
 ; LA64:       # %bb.0:
@@ -311,7 +312,7 @@ define i32 @f32_fcmp_une(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp une float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -325,7 +326,7 @@ define i32 @f32_fcmp_uno(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_uno:
 ; LA64:       # %bb.0:
@@ -334,7 +335,7 @@ define i32 @f32_fcmp_uno(float %a, float %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uno float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -343,11 +344,11 @@ define i32 @f32_fcmp_uno(float %a, float %b, i32 %x, i32 %y) {
 define i32 @f32_fcmp_true(float %a, float %b, i32 %x, i32 %y) {
 ; LA32-LABEL: f32_fcmp_true:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f32_fcmp_true:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp true float %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -357,12 +358,12 @@ define i32 @f64_fcmp_false(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-LABEL: f64_fcmp_false:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_false:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    move $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp false double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -376,7 +377,7 @@ define i32 @f64_fcmp_oeq(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_oeq:
 ; LA64:       # %bb.0:
@@ -385,7 +386,7 @@ define i32 @f64_fcmp_oeq(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oeq double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -399,7 +400,7 @@ define i32 @f64_fcmp_ogt(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_ogt:
 ; LA64:       # %bb.0:
@@ -408,7 +409,7 @@ define i32 @f64_fcmp_ogt(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ogt double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -422,7 +423,7 @@ define i32 @f64_fcmp_oge(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_oge:
 ; LA64:       # %bb.0:
@@ -431,7 +432,7 @@ define i32 @f64_fcmp_oge(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp oge double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -445,7 +446,7 @@ define i32 @f64_fcmp_olt(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_olt:
 ; LA64:       # %bb.0:
@@ -454,7 +455,7 @@ define i32 @f64_fcmp_olt(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp olt double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -468,7 +469,7 @@ define i32 @f64_fcmp_ole(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_ole:
 ; LA64:       # %bb.0:
@@ -477,7 +478,7 @@ define i32 @f64_fcmp_ole(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ole double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -491,7 +492,7 @@ define i32 @f64_fcmp_one(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_one:
 ; LA64:       # %bb.0:
@@ -500,7 +501,7 @@ define i32 @f64_fcmp_one(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp one double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -514,7 +515,7 @@ define i32 @f64_fcmp_ord(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_ord:
 ; LA64:       # %bb.0:
@@ -523,7 +524,7 @@ define i32 @f64_fcmp_ord(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ord double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -537,7 +538,7 @@ define i32 @f64_fcmp_ueq(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_ueq:
 ; LA64:       # %bb.0:
@@ -546,7 +547,7 @@ define i32 @f64_fcmp_ueq(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ueq double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -560,7 +561,7 @@ define i32 @f64_fcmp_ugt(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_ugt:
 ; LA64:       # %bb.0:
@@ -569,7 +570,7 @@ define i32 @f64_fcmp_ugt(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ugt double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -583,7 +584,7 @@ define i32 @f64_fcmp_uge(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_uge:
 ; LA64:       # %bb.0:
@@ -592,7 +593,7 @@ define i32 @f64_fcmp_uge(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uge double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -606,7 +607,7 @@ define i32 @f64_fcmp_ult(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_ult:
 ; LA64:       # %bb.0:
@@ -615,7 +616,7 @@ define i32 @f64_fcmp_ult(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ult double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -629,7 +630,7 @@ define i32 @f64_fcmp_ule(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_ule:
 ; LA64:       # %bb.0:
@@ -638,7 +639,7 @@ define i32 @f64_fcmp_ule(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp ule double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -652,7 +653,7 @@ define i32 @f64_fcmp_une(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_une:
 ; LA64:       # %bb.0:
@@ -661,7 +662,7 @@ define i32 @f64_fcmp_une(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp une double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -675,7 +676,7 @@ define i32 @f64_fcmp_uno(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a1, $a2
 ; LA32-NEXT:    maskeqz $a0, $a0, $a2
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_uno:
 ; LA64:       # %bb.0:
@@ -684,7 +685,7 @@ define i32 @f64_fcmp_uno(double %a, double %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a1, $a2
 ; LA64-NEXT:    maskeqz $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp uno double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res
@@ -693,11 +694,11 @@ define i32 @f64_fcmp_uno(double %a, double %b, i32 %x, i32 %y) {
 define i32 @f64_fcmp_true(double %a, double %b, i32 %x, i32 %y) {
 ; LA32-LABEL: f64_fcmp_true:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: f64_fcmp_true:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cmp = fcmp true double %a, %b
   %res = select i1 %cmp, i32 %x, i32 %y
   ret i32 %res

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll
index 5ccee6b193b0d..d8b0ecfd5dac0 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
 
@@ -10,7 +11,7 @@ define double @select_eq(i32 signext %a, i32 signext %b, double %x, double %y) {
 ; LA32-NEXT:    sltui $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_eq:
 ; LA64:       # %bb.0:
@@ -18,7 +19,7 @@ define double @select_eq(i32 signext %a, i32 signext %b, double %x, double %y) {
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp eq i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -31,7 +32,7 @@ define double @select_ne(i32 signext %a, i32 signext %b, double %x, double %y) {
 ; LA32-NEXT:    sltu $a0, $zero, $a0
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ne:
 ; LA64:       # %bb.0:
@@ -39,7 +40,7 @@ define double @select_ne(i32 signext %a, i32 signext %b, double %x, double %y) {
 ; LA64-NEXT:    sltu $a0, $zero, $a0
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ne i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -51,14 +52,14 @@ define double @select_ugt(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    sltu $a0, $a1, $a0
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ugt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a1, $a0
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ugt i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -71,7 +72,7 @@ define double @select_uge(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_uge:
 ; LA64:       # %bb.0:
@@ -79,7 +80,7 @@ define double @select_uge(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp uge i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -91,14 +92,14 @@ define double @select_ult(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    sltu $a0, $a0, $a1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ult:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a0, $a1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ult i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -111,7 +112,7 @@ define double @select_ule(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ule:
 ; LA64:       # %bb.0:
@@ -119,7 +120,7 @@ define double @select_ule(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ule i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -131,14 +132,14 @@ define double @select_sgt(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    slt $a0, $a1, $a0
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sgt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a1, $a0
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sgt i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -151,7 +152,7 @@ define double @select_sge(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sge:
 ; LA64:       # %bb.0:
@@ -159,7 +160,7 @@ define double @select_sge(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sge i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -171,14 +172,14 @@ define double @select_slt(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    slt $a0, $a0, $a1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_slt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a0, $a1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp slt i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res
@@ -191,7 +192,7 @@ define double @select_sle(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sle:
 ; LA64:       # %bb.0:
@@ -199,7 +200,7 @@ define double @select_sle(i32 signext %a, i32 signext %b, double %x, double %y)
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sle i32 %a, %b
   %res = select i1 %cond, double %x, double %y
   ret double %res

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll
index 98b999776e3f6..8870e78edf6e3 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
 
@@ -10,7 +11,7 @@ define float @select_eq(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    sltui $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_eq:
 ; LA64:       # %bb.0:
@@ -18,7 +19,7 @@ define float @select_eq(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp eq i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -31,7 +32,7 @@ define float @select_ne(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    sltu $a0, $zero, $a0
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ne:
 ; LA64:       # %bb.0:
@@ -39,7 +40,7 @@ define float @select_ne(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA64-NEXT:    sltu $a0, $zero, $a0
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ne i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -51,14 +52,14 @@ define float @select_ugt(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    sltu $a0, $a1, $a0
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ugt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a1, $a0
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ugt i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -71,7 +72,7 @@ define float @select_uge(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_uge:
 ; LA64:       # %bb.0:
@@ -79,7 +80,7 @@ define float @select_uge(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp uge i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -91,14 +92,14 @@ define float @select_ult(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    sltu $a0, $a0, $a1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ult:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sltu $a0, $a0, $a1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ult i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -111,7 +112,7 @@ define float @select_ule(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ule:
 ; LA64:       # %bb.0:
@@ -119,7 +120,7 @@ define float @select_ule(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ule i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -131,14 +132,14 @@ define float @select_sgt(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    slt $a0, $a1, $a0
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sgt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a1, $a0
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sgt i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -151,7 +152,7 @@ define float @select_sge(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sge:
 ; LA64:       # %bb.0:
@@ -159,7 +160,7 @@ define float @select_sge(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sge i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -171,14 +172,14 @@ define float @select_slt(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    slt $a0, $a0, $a1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_slt:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slt $a0, $a0, $a1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp slt i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res
@@ -191,7 +192,7 @@ define float @select_sle(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA32-NEXT:    xori $a0, $a0, 1
 ; LA32-NEXT:    movgr2cf $fcc0, $a0
 ; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sle:
 ; LA64:       # %bb.0:
@@ -199,7 +200,7 @@ define float @select_sle(i32 signext %a, i32 signext %b, float %x, float %y) {
 ; LA64-NEXT:    xori $a0, $a0, 1
 ; LA64-NEXT:    movgr2cf $fcc0, $a0
 ; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sle i32 %a, %b
   %res = select i1 %cond, float %x, float %y
   ret float %res

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll
index 3b7c2adfb868b..0acf31f8bb1ab 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -11,7 +12,7 @@ define i32 @select_eq(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_eq:
 ; LA64:       # %bb.0:
@@ -20,7 +21,7 @@ define i32 @select_eq(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp eq i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -34,7 +35,7 @@ define i32 @select_ne(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ne:
 ; LA64:       # %bb.0:
@@ -43,7 +44,7 @@ define i32 @select_ne(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ne i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -56,7 +57,7 @@ define i32 @select_ugt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ugt:
 ; LA64:       # %bb.0:
@@ -64,7 +65,7 @@ define i32 @select_ugt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ugt i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -78,7 +79,7 @@ define i32 @select_uge(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_uge:
 ; LA64:       # %bb.0:
@@ -87,7 +88,7 @@ define i32 @select_uge(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp uge i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -100,7 +101,7 @@ define i32 @select_ult(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ult:
 ; LA64:       # %bb.0:
@@ -108,7 +109,7 @@ define i32 @select_ult(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ult i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -122,7 +123,7 @@ define i32 @select_ule(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_ule:
 ; LA64:       # %bb.0:
@@ -131,7 +132,7 @@ define i32 @select_ule(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp ule i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -144,7 +145,7 @@ define i32 @select_sgt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sgt:
 ; LA64:       # %bb.0:
@@ -152,7 +153,7 @@ define i32 @select_sgt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sgt i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -166,7 +167,7 @@ define i32 @select_sge(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sge:
 ; LA64:       # %bb.0:
@@ -175,7 +176,7 @@ define i32 @select_sge(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sge i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -188,7 +189,7 @@ define i32 @select_slt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_slt:
 ; LA64:       # %bb.0:
@@ -196,7 +197,7 @@ define i32 @select_slt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp slt i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res
@@ -210,7 +211,7 @@ define i32 @select_sle(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA32-NEXT:    masknez $a1, $a3, $a0
 ; LA32-NEXT:    maskeqz $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: select_sle:
 ; LA64:       # %bb.0:
@@ -219,7 +220,7 @@ define i32 @select_sle(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
 ; LA64-NEXT:    masknez $a1, $a3, $a0
 ; LA64-NEXT:    maskeqz $a0, $a2, $a0
 ; LA64-NEXT:    or $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %cond = icmp sle i32 %a, %b
   %res = select i1 %cond, i32 %x, i32 %y
   ret i32 %res

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sext-zext-trunc.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sext-zext-trunc.ll
index 911751bc65525..7053d53408961 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/sext-zext-trunc.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sext-zext-trunc.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -8,13 +9,13 @@ define i8 @sext_i1_to_i8(i1 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    sub.w $a0, $zero, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i1_to_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
 ; LA64-NEXT:    sub.d $a0, $zero, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i1 %a to i8
   ret i8 %1
 }
@@ -24,13 +25,13 @@ define i16 @sext_i1_to_i16(i1 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    sub.w $a0, $zero, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i1_to_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
 ; LA64-NEXT:    sub.d $a0, $zero, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i1 %a to i16
   ret i16 %1
 }
@@ -40,13 +41,13 @@ define i32 @sext_i1_to_i32(i1 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    sub.w $a0, $zero, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i1_to_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
 ; LA64-NEXT:    sub.d $a0, $zero, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i1 %a to i32
   ret i32 %1
 }
@@ -57,13 +58,13 @@ define i64 @sext_i1_to_i64(i1 %a) {
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    sub.w $a0, $zero, $a0
 ; LA32-NEXT:    move $a1, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i1_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
 ; LA64-NEXT:    sub.d $a0, $zero, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i1 %a to i64
   ret i64 %1
 }
@@ -72,12 +73,12 @@ define i16 @sext_i8_to_i16(i8 %a) {
 ; LA32-LABEL: sext_i8_to_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.b $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i8_to_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i8 %a to i16
   ret i16 %1
 }
@@ -86,12 +87,12 @@ define i32 @sext_i8_to_i32(i8 %a) {
 ; LA32-LABEL: sext_i8_to_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.b $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i8_to_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i8 %a to i32
   ret i32 %1
 }
@@ -101,12 +102,12 @@ define i64 @sext_i8_to_i64(i8 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.b $a0, $a0
 ; LA32-NEXT:    srai.w $a1, $a0, 31
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i8_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.b $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i8 %a to i64
   ret i64 %1
 }
@@ -115,12 +116,12 @@ define i32 @sext_i16_to_i32(i16 %a) {
 ; LA32-LABEL: sext_i16_to_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.h $a0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i16_to_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.h $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i16 %a to i32
   ret i32 %1
 }
@@ -130,12 +131,12 @@ define i64 @sext_i16_to_i64(i16 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ext.w.h $a0, $a0
 ; LA32-NEXT:    srai.w $a1, $a0, 31
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i16_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ext.w.h $a0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i16 %a to i64
   ret i64 %1
 }
@@ -144,12 +145,12 @@ define i64 @sext_i32_to_i64(i32 %a) {
 ; LA32-LABEL: sext_i32_to_i64:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    srai.w $a1, $a0, 31
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sext_i32_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.w $a0, $a0, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = sext i32 %a to i64
   ret i64 %1
 }
@@ -158,12 +159,12 @@ define i8 @zext_i1_to_i8(i1 %a) {
 ; LA32-LABEL: zext_i1_to_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i1_to_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i1 %a to i8
   ret i8 %1
 }
@@ -172,12 +173,12 @@ define i16 @zext_i1_to_i16(i1 %a) {
 ; LA32-LABEL: zext_i1_to_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i1_to_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i1 %a to i16
   ret i16 %1
 }
@@ -186,12 +187,12 @@ define i32 @zext_i1_to_i32(i1 %a) {
 ; LA32-LABEL: zext_i1_to_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i1_to_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i1 %a to i32
   ret i32 %1
 }
@@ -201,12 +202,12 @@ define i64 @zext_i1_to_i64(i1 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 1
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i1_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i1 %a to i64
   ret i64 %1
 }
@@ -215,12 +216,12 @@ define i16 @zext_i8_to_i16(i8 %a) {
 ; LA32-LABEL: zext_i8_to_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 255
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i8_to_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 255
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i8 %a to i16
   ret i16 %1
 }
@@ -229,12 +230,12 @@ define i32 @zext_i8_to_i32(i8 %a) {
 ; LA32-LABEL: zext_i8_to_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 255
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i8_to_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 255
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i8 %a to i32
   ret i32 %1
 }
@@ -244,12 +245,12 @@ define i64 @zext_i8_to_i64(i8 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a0, $a0, 255
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i8_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a0, $a0, 255
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i8 %a to i64
   ret i64 %1
 }
@@ -258,12 +259,12 @@ define i32 @zext_i16_to_i32(i16 %a) {
 ; LA32-LABEL: zext_i16_to_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i16_to_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i16 %a to i32
   ret i32 %1
 }
@@ -273,12 +274,12 @@ define i64 @zext_i16_to_i64(i16 %a) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i16_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i16 %a to i64
   ret i64 %1
 }
@@ -287,12 +288,12 @@ define i64 @zext_i32_to_i64(i32 %a) {
 ; LA32-LABEL: zext_i32_to_i64:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    move $a1, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: zext_i32_to_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = zext i32 %a to i64
   ret i64 %1
 }
@@ -300,11 +301,11 @@ define i64 @zext_i32_to_i64(i32 %a) {
 define i1 @trunc_i8_to_i1(i8 %a) {
 ; LA32-LABEL: trunc_i8_to_i1:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i8_to_i1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i8 %a to i1
   ret i1 %1
 }
@@ -312,11 +313,11 @@ define i1 @trunc_i8_to_i1(i8 %a) {
 define i1 @trunc_i16_to_i1(i16 %a) {
 ; LA32-LABEL: trunc_i16_to_i1:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i16_to_i1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i16 %a to i1
   ret i1 %1
 }
@@ -324,11 +325,11 @@ define i1 @trunc_i16_to_i1(i16 %a) {
 define i1 @trunc_i32_to_i1(i32 %a) {
 ; LA32-LABEL: trunc_i32_to_i1:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i32_to_i1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i32 %a to i1
   ret i1 %1
 }
@@ -336,11 +337,11 @@ define i1 @trunc_i32_to_i1(i32 %a) {
 define i1 @trunc_i64_to_i1(i64 %a) {
 ; LA32-LABEL: trunc_i64_to_i1:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i64_to_i1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i64 %a to i1
   ret i1 %1
 }
@@ -348,11 +349,11 @@ define i1 @trunc_i64_to_i1(i64 %a) {
 define i8 @trunc_i16_to_i8(i16 %a) {
 ; LA32-LABEL: trunc_i16_to_i8:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i16_to_i8:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i16 %a to i8
   ret i8 %1
 }
@@ -360,11 +361,11 @@ define i8 @trunc_i16_to_i8(i16 %a) {
 define i8 @trunc_i32_to_i8(i32 %a) {
 ; LA32-LABEL: trunc_i32_to_i8:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i32_to_i8:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i32 %a to i8
   ret i8 %1
 }
@@ -372,11 +373,11 @@ define i8 @trunc_i32_to_i8(i32 %a) {
 define i8 @trunc_i64_to_i8(i64 %a) {
 ; LA32-LABEL: trunc_i64_to_i8:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i64_to_i8:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i64 %a to i8
   ret i8 %1
 }
@@ -384,11 +385,11 @@ define i8 @trunc_i64_to_i8(i64 %a) {
 define i16 @trunc_i32_to_i16(i32 %a) {
 ; LA32-LABEL: trunc_i32_to_i16:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i32_to_i16:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i32 %a to i16
   ret i16 %1
 }
@@ -396,11 +397,11 @@ define i16 @trunc_i32_to_i16(i32 %a) {
 define i16 @trunc_i64_to_i16(i64 %a) {
 ; LA32-LABEL: trunc_i64_to_i16:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i64_to_i16:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i64 %a to i16
   ret i16 %1
 }
@@ -408,11 +409,11 @@ define i16 @trunc_i64_to_i16(i64 %a) {
 define i32 @trunc_i64_to_i32(i64 %a) {
 ; LA32-LABEL: trunc_i64_to_i32:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: trunc_i64_to_i32:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = trunc i64 %a to i32
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll
index de25040452b12..4baf18931dc57 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -6,11 +7,11 @@
 define i1 @shl_i1(i1 %x, i1 %y) {
 ; LA32-LABEL: shl_i1:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i1 %x, %y
   ret i1 %shl
 }
@@ -19,12 +20,12 @@ define i8 @shl_i8(i8 %x, i8 %y) {
 ; LA32-LABEL: shl_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sll.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sll.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i8 %x, %y
   ret i8 %shl
 }
@@ -33,12 +34,12 @@ define i16 @shl_i16(i16 %x, i16 %y) {
 ; LA32-LABEL: shl_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sll.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sll.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i16 %x, %y
   ret i16 %shl
 }
@@ -47,12 +48,12 @@ define i32 @shl_i32(i32 %x, i32 %y) {
 ; LA32-LABEL: shl_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sll.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sll.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i32 %x, %y
   ret i32 %shl
 }
@@ -74,12 +75,12 @@ define i64 @shl_i64(i64 %x, i64 %y) {
 ; LA32-NEXT:    sll.w $a0, $a0, $a2
 ; LA32-NEXT:    srai.w $a2, $a3, 31
 ; LA32-NEXT:    and $a0, $a2, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sll.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i64 %x, %y
   ret i64 %shl
 }
@@ -87,11 +88,11 @@ define i64 @shl_i64(i64 %x, i64 %y) {
 define i1 @shl_i1_3(i1 %x) {
 ; LA32-LABEL: shl_i1_3:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i1_3:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i1 %x, 3
   ret i1 %shl
 }
@@ -100,12 +101,12 @@ define i8 @shl_i8_3(i8 %x) {
 ; LA32-LABEL: shl_i8_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slli.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i8_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i8 %x, 3
   ret i8 %shl
 }
@@ -114,12 +115,12 @@ define i16 @shl_i16_3(i16 %x) {
 ; LA32-LABEL: shl_i16_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slli.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i16_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i16 %x, 3
   ret i16 %shl
 }
@@ -128,12 +129,12 @@ define i32 @shl_i32_3(i32 %x) {
 ; LA32-LABEL: shl_i32_3:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slli.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i32_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i32 %x, 3
   ret i32 %shl
 }
@@ -145,12 +146,12 @@ define i64 @shl_i64_3(i64 %x) {
 ; LA32-NEXT:    srli.w $a2, $a0, 29
 ; LA32-NEXT:    or $a1, $a1, $a2
 ; LA32-NEXT:    slli.w $a0, $a0, 3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: shl_i64_3:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a0, $a0, 3
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i64 %x, 3
   ret i64 %shl
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
index dfa55c29ebaed..98357744f5219 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define i1 @sub_i1(i1 %x, i1 %y) {
 ; LA32-LABEL: sub_i1:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sub_i1:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = sub i1 %x, %y
   ret i1 %sub
 }
@@ -21,12 +22,12 @@ define i8 @sub_i8(i8 %x, i8 %y) {
 ; LA32-LABEL: sub_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sub_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = sub i8 %x, %y
   ret i8 %sub
 }
@@ -35,12 +36,12 @@ define i16 @sub_i16(i16 %x, i16 %y) {
 ; LA32-LABEL: sub_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sub_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = sub i16 %x, %y
   ret i16 %sub
 }
@@ -49,12 +50,12 @@ define i32 @sub_i32(i32 %x, i32 %y) {
 ; LA32-LABEL: sub_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sub_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = sub i32 %x, %y
   ret i32 %sub
 }
@@ -65,12 +66,12 @@ define signext i32 @sub_i32_sext(i32 %x, i32 %y) {
 ; LA32-LABEL: sub_i32_sext:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sub_i32_sext:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = sub i32 %x, %y
   ret i32 %sub
 }
@@ -82,12 +83,12 @@ define i64 @sub_i64(i64 %x, i64 %y) {
 ; LA32-NEXT:    sltu $a3, $a0, $a2
 ; LA32-NEXT:    sub.w $a1, $a1, $a3
 ; LA32-NEXT:    sub.w $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sub_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %sub = sub i64 %x, %y
   ret i64 %sub
 }

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll
index 2f85e645c04f7..373c9cf4b64e9 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -7,12 +8,12 @@ define i1 @xor_i1(i1 %a, i1 %b) {
 ; LA32-LABEL: xor_i1:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i1:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i1 %a, %b
   ret i1 %r
@@ -22,12 +23,12 @@ define i8 @xor_i8(i8 %a, i8 %b) {
 ; LA32-LABEL: xor_i8:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i8:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i8 %a, %b
   ret i8 %r
@@ -37,12 +38,12 @@ define i16 @xor_i16(i16 %a, i16 %b) {
 ; LA32-LABEL: xor_i16:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i16:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i16 %a, %b
   ret i16 %r
@@ -52,12 +53,12 @@ define i32 @xor_i32(i32 %a, i32 %b) {
 ; LA32-LABEL: xor_i32:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i32:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i32 %a, %b
   ret i32 %r
@@ -68,12 +69,12 @@ define i64 @xor_i64(i64 %a, i64 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xor $a0, $a0, $a2
 ; LA32-NEXT:    xor $a1, $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i64:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i64 %a, %b
   ret i64 %r
@@ -82,11 +83,11 @@ entry:
 define i1 @xor_i1_0(i1 %b) {
 ; LA32-LABEL: xor_i1_0:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i1_0:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i1 4, %b
   ret i1 %r
@@ -96,12 +97,12 @@ define i1 @xor_i1_5(i1 %b) {
 ; LA32-LABEL: xor_i1_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i1_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i1 5, %b
   ret i1 %r
@@ -111,12 +112,12 @@ define i8 @xor_i8_5(i8 %b) {
 ; LA32-LABEL: xor_i8_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i8_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i8 5, %b
   ret i8 %r
@@ -126,12 +127,12 @@ define i8 @xor_i8_257(i8 %b) {
 ; LA32-LABEL: xor_i8_257:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i8_257:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i8 257, %b
   ret i8 %r
@@ -141,12 +142,12 @@ define i16 @xor_i16_5(i16 %b) {
 ; LA32-LABEL: xor_i16_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i16_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i16 5, %b
   ret i16 %r
@@ -157,13 +158,13 @@ define i16 @xor_i16_0x1000(i16 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a1, 1
 ; LA32-NEXT:    xor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i16_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i16 4096, %b
   ret i16 %r
@@ -173,12 +174,12 @@ define i16 @xor_i16_0x10001(i16 %b) {
 ; LA32-LABEL: xor_i16_0x10001:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i16_0x10001:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i16 65537, %b
   ret i16 %r
@@ -188,12 +189,12 @@ define i32 @xor_i32_5(i32 %b) {
 ; LA32-LABEL: xor_i32_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i32_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i32 5, %b
   ret i32 %r
@@ -204,13 +205,13 @@ define i32 @xor_i32_0x1000(i32 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a1, 1
 ; LA32-NEXT:    xor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i32_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i32 4096, %b
   ret i32 %r
@@ -220,12 +221,12 @@ define i32 @xor_i32_0x100000001(i32 %b) {
 ; LA32-LABEL: xor_i32_0x100000001:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i32_0x100000001:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i32 4294967297, %b
   ret i32 %r
@@ -235,12 +236,12 @@ define i64 @xor_i64_5(i64 %b) {
 ; LA32-LABEL: xor_i64_5:
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    xori $a0, $a0, 5
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i64_5:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    xori $a0, $a0, 5
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i64 5, %b
   ret i64 %r
@@ -251,13 +252,13 @@ define i64 @xor_i64_0x1000(i64 %b) {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    lu12i.w $a2, 1
 ; LA32-NEXT:    xor $a0, $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xor_i64_0x1000:
 ; LA64:       # %bb.0: # %entry
 ; LA64-NEXT:    lu12i.w $a1, 1
 ; LA64-NEXT:    xor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
 entry:
   %r = xor i64 4096, %b
   ret i64 %r

diff  --git a/llvm/test/CodeGen/LoongArch/not.ll b/llvm/test/CodeGen/LoongArch/not.ll
index 932ec39e51997..b9e02bdf111d9 100644
--- a/llvm/test/CodeGen/LoongArch/not.ll
+++ b/llvm/test/CodeGen/LoongArch/not.ll
@@ -6,12 +6,12 @@ define i8 @nor_i8(i8 %a, i8 %b) nounwind {
 ; LA32-LABEL: nor_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = or i8 %a, %b
   %neg = xor i8 %or, -1
   ret i8 %neg
@@ -21,12 +21,12 @@ define i16 @nor_i16(i16 %a, i16 %b) nounwind {
 ; LA32-LABEL: nor_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = or i16 %a, %b
   %neg = xor i16 %or, -1
   ret i16 %neg
@@ -36,12 +36,12 @@ define i32 @nor_i32(i32 %a, i32 %b) nounwind {
 ; LA32-LABEL: nor_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = or i32 %a, %b
   %neg = xor i32 %or, -1
   ret i32 %neg
@@ -52,12 +52,12 @@ define i64 @nor_i64(i64 %a, i64 %b) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $a2
 ; LA32-NEXT:    nor $a1, $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = or i64 %a, %b
   %neg = xor i64 %or, -1
   ret i64 %neg
@@ -67,12 +67,12 @@ define i8 @nor_zero_i8(i8 %a) nounwind {
 ; LA32-LABEL: nor_zero_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_zero_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i8 %a, -1
   ret i8 %neg
 }
@@ -81,12 +81,12 @@ define i16 @nor_zero_i16(i16 %a) nounwind {
 ; LA32-LABEL: nor_zero_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_zero_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i16 %a, -1
   ret i16 %neg
 }
@@ -95,12 +95,12 @@ define i32 @nor_zero_i32(i32 %a) nounwind {
 ; LA32-LABEL: nor_zero_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_zero_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i32 %a, -1
   ret i32 %neg
 }
@@ -110,12 +110,12 @@ define i64 @nor_zero_i64(i64 %a) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    nor $a0, $a0, $zero
 ; LA32-NEXT:    nor $a1, $a1, $zero
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: nor_zero_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    nor $a0, $a0, $zero
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i64 %a, -1
   ret i64 %neg
 }
@@ -124,12 +124,12 @@ define i8 @orn_i8(i8 %a, i8 %b) nounwind {
 ; LA32-LABEL: orn_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    orn $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: orn_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    orn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i8 %b, -1
   %or = or i8 %neg, %a
   ret i8 %or
@@ -139,12 +139,12 @@ define i16 @orn_i16(i16 %a, i16 %b) nounwind {
 ; LA32-LABEL: orn_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    orn $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: orn_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    orn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i16 %b, -1
   %or = or i16 %neg, %a
   ret i16 %or
@@ -154,12 +154,12 @@ define i32 @orn_i32(i32 %a, i32 %b) nounwind {
 ; LA32-LABEL: orn_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    orn $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: orn_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    orn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i32 %b, -1
   %or = or i32 %neg, %a
   ret i32 %or
@@ -170,12 +170,12 @@ define i64 @orn_i64(i64 %a, i64 %b) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    orn $a0, $a0, $a2
 ; LA32-NEXT:    orn $a1, $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: orn_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    orn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i64 %b, -1
   %or = or i64 %neg, %a
   ret i64 %or
@@ -185,12 +185,12 @@ define i8 @andn_i8(i8 %a, i8 %b) nounwind {
 ; LA32-LABEL: andn_i8:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andn $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: andn_i8:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i8 %b, -1
   %and = and i8 %neg, %a
   ret i8 %and
@@ -200,12 +200,12 @@ define i16 @andn_i16(i16 %a, i16 %b) nounwind {
 ; LA32-LABEL: andn_i16:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andn $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: andn_i16:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i16 %b, -1
   %and = and i16 %neg, %a
   ret i16 %and
@@ -215,12 +215,12 @@ define i32 @andn_i32(i32 %a, i32 %b) nounwind {
 ; LA32-LABEL: andn_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andn $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: andn_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i32 %b, -1
   %and = and i32 %neg, %a
   ret i32 %and
@@ -231,12 +231,12 @@ define i64 @andn_i64(i64 %a, i64 %b) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andn $a0, $a0, $a2
 ; LA32-NEXT:    andn $a1, $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: andn_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andn $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %neg = xor i64 %b, -1
   %and = and i64 %neg, %a
   ret i64 %and

diff  --git a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
index 67559b134cbd2..f54a47ae847af 100644
--- a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
+++ b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
@@ -10,7 +10,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind {
 ; LA32-NEXT:    ori $a2, $zero, 32
 ; LA32-NEXT:    sub.w $a1, $a2, $a1
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_32:
 ; LA64:       # %bb.0:
@@ -18,7 +18,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a0, $a1
 ; LA64-NEXT:    srl.w $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i32 32, %y
   %b = shl i32 %x, %y
   %c = lshr i32 %x, %z
@@ -30,12 +30,12 @@ define i32 @rotr_32(i32 %x, i32 %y) nounwind {
 ; LA32-LABEL: rotr_32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i32 32, %y
   %b = lshr i32 %x, %y
   %c = shl i32 %x, %z
@@ -79,14 +79,14 @@ define i64 @rotl_64(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    srai.w $a2, $a4, 31
 ; LA32-NEXT:    and $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ori $a2, $zero, 64
 ; LA64-NEXT:    sub.d $a1, $a2, $a1
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i64 64, %y
   %b = shl i64 %x, %y
   %c = lshr i64 %x, %z
@@ -130,12 +130,12 @@ define i64 @rotr_64(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    srai.w $a2, $a4, 31
 ; LA32-NEXT:    and $a1, $a2, $a1
 ; LA32-NEXT:    or $a1, $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i64 64, %y
   %b = lshr i64 %x, %y
   %c = shl i64 %x, %z
@@ -148,7 +148,7 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a1, $zero, $a1
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_32_mask:
 ; LA64:       # %bb.0:
@@ -156,7 +156,7 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a0, $a1
 ; LA64-NEXT:    srl.w $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i32 0, %y
   %and = and i32 %z, 31
   %b = shl i32 %x, %y
@@ -170,7 +170,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a1, $zero, $a1
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_32_mask_and_63_and_31:
 ; LA64:       # %bb.0:
@@ -178,7 +178,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a0, $a1
 ; LA64-NEXT:    srl.w $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = and i32 %y, 63
   %b = shl i32 %x, %a
   %c = sub i32 0, %y
@@ -193,7 +193,7 @@ define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sub.w $a1, $zero, $a1
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_32_mask_or_64_or_32:
 ; LA64:       # %bb.0:
@@ -201,7 +201,7 @@ define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a0, $a1
 ; LA64-NEXT:    srl.w $a0, $a0, $a2
 ; LA64-NEXT:    or $a0, $a1, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = or i32 %y, 64
   %b = shl i32 %x, %a
   %c = sub i32 0, %y
@@ -215,12 +215,12 @@ define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind {
 ; LA32-LABEL: rotr_32_mask:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_32_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i32 0, %y
   %and = and i32 %z, 31
   %b = lshr i32 %x, %y
@@ -233,12 +233,12 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
 ; LA32-LABEL: rotr_32_mask_and_63_and_31:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_32_mask_and_63_and_31:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = and i32 %y, 63
   %b = lshr i32 %x, %a
   %c = sub i32 0, %y
@@ -252,12 +252,12 @@ define i32 @rotr_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
 ; LA32-LABEL: rotr_32_mask_or_64_or_32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    rotr.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_32_mask_or_64_or_32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = or i32 %y, 64
   %b = lshr i32 %x, %a
   %c = sub i32 0, %y
@@ -303,13 +303,13 @@ define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    and $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
 ; LA32-NEXT:    move $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_64_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a1, $zero, $a1
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i64 0, %y
   %and = and i64 %z, 63
   %b = shl i64 %x, %y
@@ -355,13 +355,13 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    and $a0, $a2, $a0
 ; LA32-NEXT:    or $a0, $a0, $a1
 ; LA32-NEXT:    move $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_64_mask_and_127_and_63:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a1, $zero, $a1
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = and i64 %y, 127
   %b = shl i64 %x, %a
   %c = sub i64 0, %y
@@ -378,13 +378,13 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    sub.w $a0, $zero, $a2
 ; LA32-NEXT:    srl.w $a0, $a1, $a0
 ; LA32-NEXT:    move $a1, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_64_mask_or_128_or_64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sub.d $a1, $zero, $a1
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = or i64 %y, 128
   %b = shl i64 %x, %a
   %c = sub i64 0, %y
@@ -430,12 +430,12 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    and $a1, $a2, $a1
 ; LA32-NEXT:    or $a1, $a1, $a0
 ; LA32-NEXT:    move $a0, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_64_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %z = sub i64 0, %y
   %and = and i64 %z, 63
   %b = lshr i64 %x, %y
@@ -481,12 +481,12 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    and $a1, $a2, $a1
 ; LA32-NEXT:    or $a1, $a1, $a0
 ; LA32-NEXT:    move $a0, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_64_mask_and_127_and_63:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = and i64 %y, 127
   %b = lshr i64 %x, %a
   %c = sub i64 0, %y
@@ -503,12 +503,12 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; LA32-NEXT:    sub.w $a1, $zero, $a2
 ; LA32-NEXT:    sll.w $a1, $a0, $a1
 ; LA32-NEXT:    move $a0, $a3
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_64_mask_or_128_or_64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotr.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %a = or i64 %y, 128
   %b = lshr i64 %x, %a
   %c = sub i64 0, %y
@@ -522,12 +522,12 @@ define i32 @rotri_i32(i32 %a) nounwind {
 ; LA32-LABEL: rotri_i32:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    rotri.w $a0, $a0, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotri_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotri.w $a0, $a0, 16
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i32 %a, 16
   %shr = lshr i32 %a, 16
   %or = or i32 %shl, %shr
@@ -540,12 +540,12 @@ define i64 @rotri_i64(i64 %a) nounwind {
 ; LA32-NEXT:    move $a2, $a0
 ; LA32-NEXT:    move $a0, $a1
 ; LA32-NEXT:    move $a1, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotri_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotri.d $a0, $a0, 32
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %shl = shl i64 %a, 32
   %shr = lshr i64 %a, 32
   %or = or i64 %shl, %shr
@@ -561,12 +561,12 @@ define signext i32 @rotl_i32_fshl(i32 signext %a) nounwind {
 ; LA32-LABEL: rotl_i32_fshl:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    rotri.w $a0, $a0, 20
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_i32_fshl:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotri.w $a0, $a0, 20
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 12)
   ret i32 %or
 }
@@ -581,12 +581,12 @@ define i64 @rotl_i64_fshl(i64 %a) nounwind {
 ; LA32-NEXT:    slli.w $a1, $a1, 12
 ; LA32-NEXT:    or $a1, $a1, $a0
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_i64_fshl:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotri.d $a0, $a0, 52
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 12)
   ret i64 %or
 }
@@ -595,7 +595,7 @@ define signext i32 @rotr_i32_fshr(i32 signext %a) nounwind {
 ; LA32-LABEL: rotr_i32_fshr:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    rotri.w $a0, $a0, 12
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_i32_fshr:
 ; LA64:       # %bb.0:
@@ -603,7 +603,7 @@ define signext i32 @rotr_i32_fshr(i32 signext %a) nounwind {
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 12
 ; LA64-NEXT:    or $a0, $a0, $a1
 ; LA64-NEXT:    addi.w $a0, $a0, 0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 12)
   ret i32 %or
 }
@@ -618,12 +618,12 @@ define i64 @rotr_i64_fshr(i64 %a) nounwind {
 ; LA32-NEXT:    slli.w $a0, $a0, 20
 ; LA32-NEXT:    or $a1, $a0, $a1
 ; LA32-NEXT:    move $a0, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_i64_fshr:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    rotri.d $a0, $a0, 12
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 12)
   ret i64 %or
 }

diff  --git a/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll b/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll
index 1878e0ed24240..e151624d908c2 100644
--- a/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll
+++ b/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
 
@@ -9,12 +10,12 @@ define i32 @sll_redundant_mask(i32 %a, i32 %b) {
 ; LA32-LABEL: sll_redundant_mask:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sll.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sll_redundant_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sll.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = and i32 %b, 31
   %2 = shl i32 %a, %1
   ret i32 %2
@@ -25,13 +26,13 @@ define i32 @sll_non_redundant_mask(i32 %a, i32 %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a1, $a1, 15
 ; LA32-NEXT:    sll.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sll_non_redundant_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a1, $a1, 15
 ; LA64-NEXT:    sll.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = and i32 %b, 15
   %2 = shl i32 %a, %1
   ret i32 %2
@@ -41,12 +42,12 @@ define i32 @srl_redundant_mask(i32 %a, i32 %b) {
 ; LA32-LABEL: srl_redundant_mask:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    srl.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srl_redundant_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    srl.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = and i32 %b, 4095
   %2 = lshr i32 %a, %1
   ret i32 %2
@@ -57,13 +58,13 @@ define i32 @srl_non_redundant_mask(i32 %a, i32 %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a1, $a1, 7
 ; LA32-NEXT:    srl.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srl_non_redundant_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a1, $a1, 7
 ; LA64-NEXT:    srl.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = and i32 %b, 7
   %2 = lshr i32 %a, %1
   ret i32 %2
@@ -73,12 +74,12 @@ define i32 @sra_redundant_mask(i32 %a, i32 %b) {
 ; LA32-LABEL: sra_redundant_mask:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    sra.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sra_redundant_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    sra.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = and i32 %b, 65535
   %2 = ashr i32 %a, %1
   ret i32 %2
@@ -89,13 +90,13 @@ define i32 @sra_non_redundant_mask(i32 %a, i32 %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    andi $a1, $a1, 32
 ; LA32-NEXT:    sra.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sra_non_redundant_mask:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    andi $a1, $a1, 32
 ; LA64-NEXT:    sra.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = and i32 %b, 32
   %2 = ashr i32 %a, %1
   ret i32 %2
@@ -106,13 +107,13 @@ define i32 @sll_redundant_mask_zeros(i32 %a, i32 %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slli.w $a1, $a1, 1
 ; LA32-NEXT:    sll.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sll_redundant_mask_zeros:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 1
 ; LA64-NEXT:    sll.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = shl i32 %b, 1
   %2 = and i32 %1, 30
   %3 = shl i32 %a, %2
@@ -124,13 +125,13 @@ define i32 @srl_redundant_mask_zeros(i32 %a, i32 %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slli.w $a1, $a1, 2
 ; LA32-NEXT:    srl.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srl_redundant_mask_zeros:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 2
 ; LA64-NEXT:    srl.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = shl i32 %b, 2
   %2 = and i32 %1, 28
   %3 = lshr i32 %a, %2
@@ -142,13 +143,13 @@ define i32 @sra_redundant_mask_zeros(i32 %a, i32 %b) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    slli.w $a1, $a1, 3
 ; LA32-NEXT:    sra.w $a0, $a0, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sra_redundant_mask_zeros:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 3
 ; LA64-NEXT:    sra.w $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = shl i32 %b, 3
   %2 = and i32 %1, 24
   %3 = ashr i32 %a, %2
@@ -174,13 +175,13 @@ define i64 @sll_redundant_mask_zeros_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    sll.w $a0, $a0, $a2
 ; LA32-NEXT:    srai.w $a2, $a3, 31
 ; LA32-NEXT:    and $a0, $a2, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sll_redundant_mask_zeros_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 2
 ; LA64-NEXT:    sll.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = shl i64 %b, 2
   %2 = and i64 %1, 60
   %3 = shl i64 %a, %2
@@ -206,13 +207,13 @@ define i64 @srl_redundant_mask_zeros_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    srl.w $a1, $a1, $a2
 ; LA32-NEXT:    srai.w $a2, $a3, 31
 ; LA32-NEXT:    and $a1, $a2, $a1
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: srl_redundant_mask_zeros_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 3
 ; LA64-NEXT:    srl.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = shl i64 %b, 3
   %2 = and i64 %1, 56
   %3 = lshr i64 %a, %2
@@ -241,13 +242,13 @@ define i64 @sra_redundant_mask_zeros_i64(i64 %a, i64 %b) {
 ; LA32-NEXT:    masknez $a1, $a1, $a6
 ; LA32-NEXT:    or $a0, $a0, $a1
 ; LA32-NEXT:    move $a1, $a2
-; LA32-NEXT:    jirl $zero, $ra, 0
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: sra_redundant_mask_zeros_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a1, $a1, 4
 ; LA64-NEXT:    sra.d $a0, $a0, $a1
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA64-NEXT:    ret
   %1 = shl i64 %b, 4
   %2 = and i64 %1, 48
   %3 = ashr i64 %a, %2

diff  --git a/llvm/test/CodeGen/LoongArch/vararg.ll b/llvm/test/CodeGen/LoongArch/vararg.ll
index 5b196fe9bb86c..94c6f93cc24dc 100644
--- a/llvm/test/CodeGen/LoongArch/vararg.ll
+++ b/llvm/test/CodeGen/LoongArch/vararg.ll
@@ -26,7 +26,7 @@ define i64 @va1(ptr %fmt, ...) {
 ; LA64-FPELIM-NEXT:    st.d $a1, $sp, 8
 ; LA64-FPELIM-NEXT:    st.d $a0, $sp, 24
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 80
-; LA64-FPELIM-NEXT:    jirl $zero, $ra, 0
+; LA64-FPELIM-NEXT:    ret
 ;
 ; LA64-WITHFP-LABEL: va1:
 ; LA64-WITHFP:       # %bb.0:
@@ -51,7 +51,7 @@ define i64 @va1(ptr %fmt, ...) {
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    addi.d $sp, $sp, 96
-; LA64-WITHFP-NEXT:    jirl $zero, $ra, 0
+; LA64-WITHFP-NEXT:    ret
   %va = alloca ptr, align 8
   call void @llvm.va_start(ptr %va)
   %argp.cur = load ptr, ptr %va, align 8
@@ -77,7 +77,7 @@ define i64 @va1_va_arg(ptr %fmt, ...) nounwind {
 ; LA64-FPELIM-NEXT:    st.d $a1, $sp, 8
 ; LA64-FPELIM-NEXT:    st.d $a0, $sp, 24
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 80
-; LA64-FPELIM-NEXT:    jirl $zero, $ra, 0
+; LA64-FPELIM-NEXT:    ret
 ;
 ; LA64-WITHFP-LABEL: va1_va_arg:
 ; LA64-WITHFP:       # %bb.0:
@@ -98,7 +98,7 @@ define i64 @va1_va_arg(ptr %fmt, ...) nounwind {
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    addi.d $sp, $sp, 96
-; LA64-WITHFP-NEXT:    jirl $zero, $ra, 0
+; LA64-WITHFP-NEXT:    ret
   %va = alloca ptr, align 8
   call void @llvm.va_start(ptr %va)
   %1 = va_arg ptr %va, i64
@@ -139,7 +139,7 @@ define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; LA64-FPELIM-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64-FPELIM-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 96
-; LA64-FPELIM-NEXT:    jirl $zero, $ra, 0
+; LA64-FPELIM-NEXT:    ret
 ;
 ; LA64-WITHFP-LABEL: va1_va_arg_alloca:
 ; LA64-WITHFP:       # %bb.0:
@@ -170,7 +170,7 @@ define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    addi.d $sp, $sp, 96
-; LA64-WITHFP-NEXT:    jirl $zero, $ra, 0
+; LA64-WITHFP-NEXT:    ret
   %va = alloca ptr, align 8
   call void @llvm.va_start(ptr %va)
   %1 = va_arg ptr %va, i64
@@ -190,7 +190,7 @@ define void @va1_caller() nounwind {
 ; LA64-FPELIM-NEXT:    bl va1
 ; LA64-FPELIM-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 16
-; LA64-FPELIM-NEXT:    jirl $zero, $ra, 0
+; LA64-FPELIM-NEXT:    ret
 ;
 ; LA64-WITHFP-LABEL: va1_caller:
 ; LA64-WITHFP:       # %bb.0:
@@ -204,7 +204,7 @@ define void @va1_caller() nounwind {
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    addi.d $sp, $sp, 16
-; LA64-WITHFP-NEXT:    jirl $zero, $ra, 0
+; LA64-WITHFP-NEXT:    ret
   %1 = call i64 (ptr, ...) @va1(ptr undef, double 1.0, i64 2)
   ret void
 }
@@ -233,7 +233,7 @@ define void @va_aligned_register_caller() nounwind {
 ; LA64-FPELIM-NEXT:    bl va_aligned_register
 ; LA64-FPELIM-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 16
-; LA64-FPELIM-NEXT:    jirl $zero, $ra, 0
+; LA64-FPELIM-NEXT:    ret
 ;
 ; LA64-WITHFP-LABEL: va_aligned_register_caller:
 ; LA64-WITHFP:       # %bb.0:
@@ -256,7 +256,7 @@ define void @va_aligned_register_caller() nounwind {
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    addi.d $sp, $sp, 16
-; LA64-WITHFP-NEXT:    jirl $zero, $ra, 0
+; LA64-WITHFP-NEXT:    ret
  %1 = call i64 (i64, i128, ...) @va_aligned_register(i64 2, i128 1111,
    fp128 0xLEB851EB851EB851F400091EB851EB851)
  ret void
@@ -305,7 +305,7 @@ define void @va_aligned_stack_caller() nounwind {
 ; LA64-FPELIM-NEXT:    bl va_aligned_stack_callee
 ; LA64-FPELIM-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 112
-; LA64-FPELIM-NEXT:    jirl $zero, $ra, 0
+; LA64-FPELIM-NEXT:    ret
 ;
 ; LA64-WITHFP-LABEL: va_aligned_stack_caller:
 ; LA64-WITHFP:       # %bb.0:
@@ -347,7 +347,7 @@ define void @va_aligned_stack_caller() nounwind {
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 96 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    addi.d $sp, $sp, 112
-; LA64-WITHFP-NEXT:    jirl $zero, $ra, 0
+; LA64-WITHFP-NEXT:    ret
   %1 = call i32 (i32, ...) @va_aligned_stack_callee(i32 1, i32 11,
     i256 1000, i32 12, i32 13, i128 18446744073709551616, i32 14,
     fp128 0xLEB851EB851EB851F400091EB851EB851, i64 15,

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_function_name.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_function_name.ll.expected
index 692941b506b82..060a6b397457b 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_function_name.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_function_name.ll.expected
@@ -7,7 +7,7 @@ define hidden i32 @"_Z54bar$ompvariant$bar"() {
 ; CHECK-LABEL: _Z54bar$ompvariant$bar:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ori $a0, $zero, 2
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
 entry:
   ret i32 2
 }

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.generated.expected
index fa82e3841cb8c..b2ba71c8292d7 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.generated.expected
@@ -109,7 +109,7 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; CHECK-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; CHECK-NEXT:    addi.w $sp, $sp, 32
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
 ;
 ; CHECK-LABEL: main:
 ; CHECK:       # %bb.0:
@@ -143,4 +143,4 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; CHECK-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; CHECK-NEXT:    addi.w $sp, $sp, 32
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.nogenerated.expected
index baf0839627c03..acccb9923b7d6 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.nogenerated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/loongarch_generated_funcs.ll.nogenerated.expected
@@ -50,7 +50,7 @@ define dso_local i32 @check_boundaries() #0 {
 ; CHECK-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; CHECK-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; CHECK-NEXT:    addi.w $sp, $sp, 32
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
@@ -120,7 +120,7 @@ define dso_local i32 @main() #0 {
 ; CHECK-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; CHECK-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; CHECK-NEXT:    addi.w $sp, $sp, 32
-; CHECK-NEXT:    jirl $zero, $ra, 0
+; CHECK-NEXT:    ret
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4


        


More information about the llvm-commits mailing list