[llvm] e7c9a99 - [LoongArch] Implement isSExtCheaperThanZExt

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 24 18:45:50 PDT 2023


Author: WANG Rui
Date: 2023-07-25T09:41:32+08:00
New Revision: e7c9a99dfe21bac8028375a52e3ce1db5c8924f0

URL: https://github.com/llvm/llvm-project/commit/e7c9a99dfe21bac8028375a52e3ce1db5c8924f0
DIFF: https://github.com/llvm/llvm-project/commit/e7c9a99dfe21bac8028375a52e3ce1db5c8924f0.diff

LOG: [LoongArch] Implement isSExtCheaperThanZExt

Implement isSExtCheaperThanZExt.

Signed-off-by: WANG Rui <wangrui at loongson.cn>

Differential Revision: https://reviews.llvm.org/D154919

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchISelLowering.h
    llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
    llvm/test/CodeGen/LoongArch/duplicate-returns-for-tailcall.ll
    llvm/test/CodeGen/LoongArch/get-setcc-result-type.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
    llvm/test/CodeGen/LoongArch/jump-table.ll
    llvm/test/CodeGen/LoongArch/nomerge.ll
    llvm/test/CodeGen/LoongArch/sext-cheaper-than-zext.ll
    llvm/test/CodeGen/LoongArch/shrinkwrap.ll
    llvm/test/CodeGen/LoongArch/spill-ra-without-kill.ll
    llvm/test/CodeGen/LoongArch/tail-calls.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index b6e92106a9f3f7..db5961fc501a0d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -3285,6 +3285,10 @@ bool LoongArchTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
   return TargetLowering::isZExtFree(Val, VT2);
 }
 
+bool LoongArchTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
+  return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
+}
+
 bool LoongArchTargetLowering::hasAndNotCompare(SDValue Y) const {
   // TODO: Support vectors.
   if (Y.getValueType().isVector())

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 8401db3c8aa554..500407493fe5a1 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -205,6 +205,7 @@ class LoongArchTargetLowering : public TargetLowering {
   bool isLegalICmpImmediate(int64_t Imm) const override;
   bool isLegalAddImmediate(int64_t Imm) const override;
   bool isZExtFree(SDValue Val, EVT VT2) const override;
+  bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
 
   bool hasAndNotCompare(SDValue Y) const override;
 

diff  --git a/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
index ad264ac53fa250..f11af8fe652842 100644
--- a/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
@@ -112,16 +112,16 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-LABEL: atomicrmw_uinc_wrap_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.w $a3, $a0, 0
-; LA64-NEXT:    bstrpick.d $a2, $a1, 31, 0
+; LA64-NEXT:    addi.w $a2, $a1, 0
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB2_3 Depth 2
-; LA64-NEXT:    bstrpick.d $a1, $a3, 31, 0
-; LA64-NEXT:    sltu $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a4, $a3, 0
+; LA64-NEXT:    sltu $a1, $a4, $a2
 ; LA64-NEXT:    xori $a1, $a1, 1
-; LA64-NEXT:    addi.d $a4, $a3, 1
-; LA64-NEXT:    masknez $a4, $a4, $a1
+; LA64-NEXT:    addi.d $a5, $a3, 1
+; LA64-NEXT:    masknez $a5, $a5, $a1
 ; LA64-NEXT:  .LBB2_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB2_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
@@ -130,16 +130,15 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB2_3 Depth=2
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    move $a5, $a4
-; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB2_3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB2_3
 ; LA64-NEXT:    b .LBB2_6
 ; LA64-NEXT:  .LBB2_5: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB2_1 Depth=1
 ; LA64-NEXT:    dbar 1792
 ; LA64-NEXT:  .LBB2_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB2_1 Depth=1
-; LA64-NEXT:    addi.w $a4, $a3, 0
 ; LA64-NEXT:    move $a3, $a1
 ; LA64-NEXT:    bne $a1, $a4, .LBB2_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end
@@ -308,21 +307,21 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-LABEL: atomicrmw_udec_wrap_i32:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.w $a4, $a0, 0
-; LA64-NEXT:    bstrpick.d $a3, $a1, 31, 0
+; LA64-NEXT:    addi.w $a3, $a1, 0
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB6_3 Depth 2
-; LA64-NEXT:    bstrpick.d $a2, $a4, 31, 0
-; LA64-NEXT:    sltu $a5, $a3, $a2
+; LA64-NEXT:    addi.w $a5, $a4, 0
+; LA64-NEXT:    sltu $a2, $a3, $a5
 ; LA64-NEXT:    addi.d $a6, $a4, -1
-; LA64-NEXT:    masknez $a6, $a6, $a5
-; LA64-NEXT:    maskeqz $a5, $a1, $a5
-; LA64-NEXT:    or $a5, $a5, $a6
-; LA64-NEXT:    sltui $a2, $a2, 1
-; LA64-NEXT:    masknez $a5, $a5, $a2
+; LA64-NEXT:    masknez $a6, $a6, $a2
 ; LA64-NEXT:    maskeqz $a2, $a1, $a2
-; LA64-NEXT:    or $a5, $a2, $a5
+; LA64-NEXT:    or $a2, $a2, $a6
+; LA64-NEXT:    sltui $a6, $a5, 1
+; LA64-NEXT:    masknez $a2, $a2, $a6
+; LA64-NEXT:    maskeqz $a6, $a1, $a6
+; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:  .LBB6_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB6_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
@@ -331,16 +330,15 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB6_3 Depth=2
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    move $a6, $a5
-; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB6_3
+; LA64-NEXT:    move $a7, $a6
+; LA64-NEXT:    sc.w $a7, $a0, 0
+; LA64-NEXT:    beqz $a7, .LBB6_3
 ; LA64-NEXT:    b .LBB6_6
 ; LA64-NEXT:  .LBB6_5: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB6_1 Depth=1
 ; LA64-NEXT:    dbar 1792
 ; LA64-NEXT:  .LBB6_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB6_1 Depth=1
-; LA64-NEXT:    addi.w $a5, $a4, 0
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    bne $a2, $a5, .LBB6_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end

diff  --git a/llvm/test/CodeGen/LoongArch/duplicate-returns-for-tailcall.ll b/llvm/test/CodeGen/LoongArch/duplicate-returns-for-tailcall.ll
index 59bc9aa4566410..80e55ef9e21f41 100644
--- a/llvm/test/CodeGen/LoongArch/duplicate-returns-for-tailcall.ll
+++ b/llvm/test/CodeGen/LoongArch/duplicate-returns-for-tailcall.ll
@@ -9,14 +9,12 @@ declare i32 @test3()
 define i32 @duplicate_returns(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: duplicate_returns:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    bstrpick.d $a2, $a0, 31, 0
-; CHECK-NEXT:    beqz $a2, .LBB0_4
-; CHECK-NEXT:  # %bb.1: # %if.else
-; CHECK-NEXT:    bstrpick.d $a2, $a1, 31, 0
-; CHECK-NEXT:    beqz $a2, .LBB0_5
-; CHECK-NEXT:  # %bb.2: # %if.else2
 ; CHECK-NEXT:    addi.w $a0, $a0, 0
+; CHECK-NEXT:    beqz $a0, .LBB0_4
+; CHECK-NEXT:  # %bb.1: # %if.else
 ; CHECK-NEXT:    addi.w $a1, $a1, 0
+; CHECK-NEXT:    beqz $a1, .LBB0_5
+; CHECK-NEXT:  # %bb.2: # %if.else2
 ; CHECK-NEXT:    bge $a1, $a0, .LBB0_6
 ; CHECK-NEXT:  # %bb.3: # %if.then3
 ; CHECK-NEXT:    b %plt(test2)

diff  --git a/llvm/test/CodeGen/LoongArch/get-setcc-result-type.ll b/llvm/test/CodeGen/LoongArch/get-setcc-result-type.ll
index 34a5102b4dde17..432cedff6d8319 100644
--- a/llvm/test/CodeGen/LoongArch/get-setcc-result-type.ll
+++ b/llvm/test/CodeGen/LoongArch/get-setcc-result-type.ll
@@ -5,19 +5,19 @@
 define void @getSetCCResultType(ptr %p) {
 ; CHECK-LABEL: getSetCCResultType:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    ld.wu $a1, $a0, 12
+; CHECK-NEXT:    ld.w $a1, $a0, 12
 ; CHECK-NEXT:    sltui $a1, $a1, 1
 ; CHECK-NEXT:    sub.d $a1, $zero, $a1
 ; CHECK-NEXT:    st.w $a1, $a0, 12
-; CHECK-NEXT:    ld.wu $a1, $a0, 8
+; CHECK-NEXT:    ld.w $a1, $a0, 8
 ; CHECK-NEXT:    sltui $a1, $a1, 1
 ; CHECK-NEXT:    sub.d $a1, $zero, $a1
 ; CHECK-NEXT:    st.w $a1, $a0, 8
-; CHECK-NEXT:    ld.wu $a1, $a0, 4
+; CHECK-NEXT:    ld.w $a1, $a0, 4
 ; CHECK-NEXT:    sltui $a1, $a1, 1
 ; CHECK-NEXT:    sub.d $a1, $zero, $a1
 ; CHECK-NEXT:    st.w $a1, $a0, 4
-; CHECK-NEXT:    ld.wu $a1, $a0, 0
+; CHECK-NEXT:    ld.w $a1, $a0, 0
 ; CHECK-NEXT:    sltui $a1, $a1, 1
 ; CHECK-NEXT:    sub.d $a1, $zero, $a1
 ; CHECK-NEXT:    st.w $a1, $a0, 0

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
index b8c0cb257122a7..76e51fe7d3e850 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
@@ -233,32 +233,31 @@ define i1 @cmpxchg_i8_acquire_acquire_reti1(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    addi.w $a3, $zero, -4
 ; LA64-NEXT:    and $a3, $a0, $a3
 ; LA64-NEXT:    slli.d $a0, $a0, 3
-; LA64-NEXT:    andi $a1, $a1, 255
-; LA64-NEXT:    sll.w $a1, $a1, $a0
 ; LA64-NEXT:    ori $a4, $zero, 255
 ; LA64-NEXT:    sll.w $a4, $a4, $a0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a0
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a0, $a2, $a0
 ; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    addi.w $a2, $a4, 0
-; LA64-NEXT:    addi.w $a5, $a1, 0
 ; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a6, $a3, 0
-; LA64-NEXT:    and $a7, $a6, $a2
-; LA64-NEXT:    bne $a7, $a5, .LBB8_3
+; LA64-NEXT:    ll.w $a5, $a3, 0
+; LA64-NEXT:    and $a6, $a5, $a2
+; LA64-NEXT:    bne $a6, $a1, .LBB8_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB8_1 Depth=1
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    andn $a7, $a6, $a2
-; LA64-NEXT:    or $a7, $a7, $a0
-; LA64-NEXT:    sc.w $a7, $a3, 0
-; LA64-NEXT:    beqz $a7, .LBB8_1
+; LA64-NEXT:    andn $a6, $a5, $a2
+; LA64-NEXT:    or $a6, $a6, $a0
+; LA64-NEXT:    sc.w $a6, $a3, 0
+; LA64-NEXT:    beqz $a6, .LBB8_1
 ; LA64-NEXT:    b .LBB8_4
 ; LA64-NEXT:  .LBB8_3:
 ; LA64-NEXT:    dbar 1792
 ; LA64-NEXT:  .LBB8_4:
-; LA64-NEXT:    and $a0, $a6, $a4
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT:    bstrpick.d $a1, $a1, 31, 0
+; LA64-NEXT:    and $a0, $a5, $a4
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
@@ -273,33 +272,32 @@ define i1 @cmpxchg_i16_acquire_acquire_reti1(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; LA64-NEXT:    addi.w $a3, $zero, -4
 ; LA64-NEXT:    and $a3, $a0, $a3
 ; LA64-NEXT:    slli.d $a0, $a0, 3
-; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
-; LA64-NEXT:    sll.w $a1, $a1, $a0
 ; LA64-NEXT:    lu12i.w $a4, 15
 ; LA64-NEXT:    ori $a4, $a4, 4095
 ; LA64-NEXT:    sll.w $a4, $a4, $a0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a0
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a0, $a2, $a0
 ; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    addi.w $a2, $a4, 0
-; LA64-NEXT:    addi.w $a5, $a1, 0
 ; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a6, $a3, 0
-; LA64-NEXT:    and $a7, $a6, $a2
-; LA64-NEXT:    bne $a7, $a5, .LBB9_3
+; LA64-NEXT:    ll.w $a5, $a3, 0
+; LA64-NEXT:    and $a6, $a5, $a2
+; LA64-NEXT:    bne $a6, $a1, .LBB9_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB9_1 Depth=1
 ; LA64-NEXT:    dbar 0
-; LA64-NEXT:    andn $a7, $a6, $a2
-; LA64-NEXT:    or $a7, $a7, $a0
-; LA64-NEXT:    sc.w $a7, $a3, 0
-; LA64-NEXT:    beqz $a7, .LBB9_1
+; LA64-NEXT:    andn $a6, $a5, $a2
+; LA64-NEXT:    or $a6, $a6, $a0
+; LA64-NEXT:    sc.w $a6, $a3, 0
+; LA64-NEXT:    beqz $a6, .LBB9_1
 ; LA64-NEXT:    b .LBB9_4
 ; LA64-NEXT:  .LBB9_3:
 ; LA64-NEXT:    dbar 1792
 ; LA64-NEXT:  .LBB9_4:
-; LA64-NEXT:    and $a0, $a6, $a4
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT:    bstrpick.d $a1, $a1, 31, 0
+; LA64-NEXT:    and $a0, $a5, $a4
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
index 91cf72bb2a76e4..36e39cc6d8480b 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
@@ -27,8 +27,8 @@ define void @foo_br_eq(i32 %a, ptr %b) nounwind {
 ;
 ; LA64-LABEL: foo_br_eq:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ld.wu $a2, $a1, 0
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    ld.w $a2, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    beq $a2, $a0, .LBB1_2
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
@@ -57,8 +57,8 @@ define void @foo_br_ne(i32 %a, ptr %b) nounwind {
 ;
 ; LA64-LABEL: foo_br_ne:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ld.wu $a2, $a1, 0
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    ld.w $a2, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    bne $a2, $a0, .LBB2_2
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
@@ -147,8 +147,8 @@ define void @foo_br_ult(i32 %a, ptr %b) nounwind {
 ;
 ; LA64-LABEL: foo_br_ult:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ld.wu $a2, $a1, 0
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    ld.w $a2, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    bltu $a2, $a0, .LBB5_2
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
@@ -177,8 +177,8 @@ define void @foo_br_uge(i32 %a, ptr %b) nounwind {
 ;
 ; LA64-LABEL: foo_br_uge:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ld.wu $a2, $a1, 0
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    ld.w $a2, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    bgeu $a2, $a0, .LBB6_2
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
@@ -268,8 +268,8 @@ define void @foo_br_ugt(i32 %a, ptr %b) nounwind {
 ;
 ; LA64-LABEL: foo_br_ugt:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ld.wu $a2, $a1, 0
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    ld.w $a2, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    bltu $a0, $a2, .LBB9_2
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0
@@ -298,8 +298,8 @@ define void @foo_br_ule(i32 %a, ptr %b) nounwind {
 ;
 ; LA64-LABEL: foo_br_ule:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ld.wu $a2, $a1, 0
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    ld.w $a2, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    bgeu $a0, $a2, .LBB10_2
 ; LA64-NEXT:  # %bb.1: # %test
 ; LA64-NEXT:    ld.w $a0, $a1, 0

diff  --git a/llvm/test/CodeGen/LoongArch/jump-table.ll b/llvm/test/CodeGen/LoongArch/jump-table.ll
index 8bd4c952cf1eee..0cd6ef02d8da27 100644
--- a/llvm/test/CodeGen/LoongArch/jump-table.ll
+++ b/llvm/test/CodeGen/LoongArch/jump-table.ll
@@ -48,7 +48,7 @@ define void @switch_4_arms(i32 %in, ptr %out) nounwind {
 ;
 ; LA64-LABEL: switch_4_arms:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    ori $a2, $zero, 2
 ; LA64-NEXT:    blt $a2, $a0, .LBB0_4
 ; LA64-NEXT:  # %bb.1: # %entry
@@ -105,7 +105,7 @@ define void @switch_4_arms(i32 %in, ptr %out) nounwind {
 ;
 ; LA64-JT-LABEL: switch_4_arms:
 ; LA64-JT:       # %bb.0: # %entry
-; LA64-JT-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-JT-NEXT:    addi.w $a0, $a0, 0
 ; LA64-JT-NEXT:    addi.d $a2, $a0, -1
 ; LA64-JT-NEXT:    ori $a0, $zero, 3
 ; LA64-JT-NEXT:    bltu $a0, $a2, .LBB0_6

diff  --git a/llvm/test/CodeGen/LoongArch/nomerge.ll b/llvm/test/CodeGen/LoongArch/nomerge.ll
index e2dfe824284ef2..a8d5116f6b67d9 100644
--- a/llvm/test/CodeGen/LoongArch/nomerge.ll
+++ b/llvm/test/CodeGen/LoongArch/nomerge.ll
@@ -6,7 +6,7 @@ define void @foo(i32 %i) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ori $a1, $zero, 7
 ; CHECK-NEXT:    beq $a0, $a1, .LBB0_3
 ; CHECK-NEXT:  # %bb.1: # %entry

diff  --git a/llvm/test/CodeGen/LoongArch/sext-cheaper-than-zext.ll b/llvm/test/CodeGen/LoongArch/sext-cheaper-than-zext.ll
index 4e64c1af559a90..c363948a149407 100644
--- a/llvm/test/CodeGen/LoongArch/sext-cheaper-than-zext.ll
+++ b/llvm/test/CodeGen/LoongArch/sext-cheaper-than-zext.ll
@@ -4,9 +4,7 @@
 define signext i32 @sext_icmp(i32 signext %x, i32 signext %y) {
 ; CHECK-LABEL: sext_icmp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    bstrpick.d $a1, $a1, 31, 0
-; CHECK-NEXT:    addi.d $a0, $a0, 1
-; CHECK-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; CHECK-NEXT:    addi.w $a0, $a0, 1
 ; CHECK-NEXT:    xor $a0, $a0, $a1
 ; CHECK-NEXT:    sltu $a0, $zero, $a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/shrinkwrap.ll b/llvm/test/CodeGen/LoongArch/shrinkwrap.ll
index 35f7c8c4773b96..5f15dd2e7eafa8 100644
--- a/llvm/test/CodeGen/LoongArch/shrinkwrap.ll
+++ b/llvm/test/CodeGen/LoongArch/shrinkwrap.ll
@@ -10,7 +10,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; NOSHRINKW-NEXT:    addi.d $sp, $sp, -16
 ; NOSHRINKW-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; NOSHRINKW-NEXT:    # kill: def $r5 killed $r4
-; NOSHRINKW-NEXT:    bstrpick.d $a1, $a0, 31, 0
+; NOSHRINKW-NEXT:    addi.w $a1, $a0, 0
 ; NOSHRINKW-NEXT:    ori $a0, $zero, 32
 ; NOSHRINKW-NEXT:    bltu $a0, $a1, .LBB0_2
 ; NOSHRINKW-NEXT:    b .LBB0_1
@@ -23,7 +23,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ;
 ; SHRINKW-LABEL: eliminate_restore:
 ; SHRINKW:       # %bb.0:
-; SHRINKW-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; SHRINKW-NEXT:    addi.w $a0, $a0, 0
 ; SHRINKW-NEXT:    ori $a1, $zero, 32
 ; SHRINKW-NEXT:    bgeu $a1, $a0, .LBB0_2
 ; SHRINKW-NEXT:  # %bb.1: # %if.end
@@ -54,7 +54,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; NOSHRINKW-NEXT:    addi.d $fp, $sp, 32
 ; NOSHRINKW-NEXT:    move $a1, $a0
 ; NOSHRINKW-NEXT:    st.d $a1, $fp, -24 # 8-byte Folded Spill
-; NOSHRINKW-NEXT:    bstrpick.d $a1, $a0, 31, 0
+; NOSHRINKW-NEXT:    addi.w $a1, $a0, 0
 ; NOSHRINKW-NEXT:    ori $a0, $zero, 32
 ; NOSHRINKW-NEXT:    bltu $a0, $a1, .LBB1_2
 ; NOSHRINKW-NEXT:    b .LBB1_1
@@ -78,14 +78,15 @@ define void @conditional_alloca(i32 %n) nounwind {
 ;
 ; SHRINKW-LABEL: conditional_alloca:
 ; SHRINKW:       # %bb.0:
-; SHRINKW-NEXT:    bstrpick.d $a0, $a0, 31, 0
-; SHRINKW-NEXT:    ori $a1, $zero, 32
-; SHRINKW-NEXT:    bltu $a1, $a0, .LBB1_2
+; SHRINKW-NEXT:    addi.w $a1, $a0, 0
+; SHRINKW-NEXT:    ori $a2, $zero, 32
+; SHRINKW-NEXT:    bltu $a2, $a1, .LBB1_2
 ; SHRINKW-NEXT:  # %bb.1: # %if.then
 ; SHRINKW-NEXT:    addi.d $sp, $sp, -16
 ; SHRINKW-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; SHRINKW-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
 ; SHRINKW-NEXT:    addi.d $fp, $sp, 16
+; SHRINKW-NEXT:    bstrpick.d $a0, $a0, 31, 0
 ; SHRINKW-NEXT:    addi.d $a0, $a0, 15
 ; SHRINKW-NEXT:    bstrpick.d $a0, $a0, 32, 4
 ; SHRINKW-NEXT:    slli.d $a0, $a0, 4

diff  --git a/llvm/test/CodeGen/LoongArch/spill-ra-without-kill.ll b/llvm/test/CodeGen/LoongArch/spill-ra-without-kill.ll
index d800befef0ba7f..092da5aba2d937 100644
--- a/llvm/test/CodeGen/LoongArch/spill-ra-without-kill.ll
+++ b/llvm/test/CodeGen/LoongArch/spill-ra-without-kill.ll
@@ -22,7 +22,7 @@ define dso_local ptr @f(i32 noundef signext %i) "frame-pointer"="all" {
 ; CHECK-NEXT:    move $a1, $a0
 ; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(calls)
 ; CHECK-NEXT:    addi.d $a3, $a0, %pc_lo12(calls)
-; CHECK-NEXT:    ld.wu $a0, $a3, 0
+; CHECK-NEXT:    ld.w $a0, $a3, 0
 ; CHECK-NEXT:    addi.d $a2, $a0, 1
 ; CHECK-NEXT:    st.w $a2, $a3, 0
 ; CHECK-NEXT:    st.w $a1, $fp, -28
@@ -33,7 +33,7 @@ define dso_local ptr @f(i32 noundef signext %i) "frame-pointer"="all" {
 ; CHECK-NEXT:    st.d $a0, $fp, -24
 ; CHECK-NEXT:    b .LBB0_7
 ; CHECK-NEXT:  .LBB0_2: # %if.end
-; CHECK-NEXT:    ld.wu $a0, $fp, -28
+; CHECK-NEXT:    ld.w $a0, $fp, -28
 ; CHECK-NEXT:    st.d $a0, $fp, -48 # 8-byte Folded Spill
 ; CHECK-NEXT:    beqz $a0, .LBB0_5
 ; CHECK-NEXT:    b .LBB0_3

diff  --git a/llvm/test/CodeGen/LoongArch/tail-calls.ll b/llvm/test/CodeGen/LoongArch/tail-calls.ll
index ff547631792ef6..8f11e03431b922 100644
--- a/llvm/test/CodeGen/LoongArch/tail-calls.ll
+++ b/llvm/test/CodeGen/LoongArch/tail-calls.ll
@@ -35,7 +35,7 @@ declare void @callee_indirect2()
 define void @caller_indirect_tail(i32 %a) nounwind {
 ; CHECK-LABEL: caller_indirect_tail:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    sltui $a0, $a0, 1
 ; CHECK-NEXT:    pcalau12i $a1, %got_pc_hi20(callee_indirect2)
 ; CHECK-NEXT:    ld.d $a1, $a1, %got_pc_lo12(callee_indirect2)


        


More information about the llvm-commits mailing list