[llvm] 8d03a37 - [llvm][LoongArch] Replace unnecessary ZERO_EXTEND to ANY_EXTEND (#162593)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Oct 12 18:21:01 PDT 2025
Author: Zhaoxin Yang
Date: 2025-10-13T09:20:56+08:00
New Revision: 8d03a37b9844070b6ccd83aa7cc8b28e91f8d213
URL: https://github.com/llvm/llvm-project/commit/8d03a37b9844070b6ccd83aa7cc8b28e91f8d213
DIFF: https://github.com/llvm/llvm-project/commit/8d03a37b9844070b6ccd83aa7cc8b28e91f8d213.diff
LOG: [llvm][LoongArch] Replace unnecessary ZERO_EXTEND to ANY_EXTEND (#162593)
Added:
Modified:
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
llvm/test/CodeGen/LoongArch/calling-conv-half.ll
llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 7ddf996f53f4c..f7deeafc9ccfc 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -632,7 +632,7 @@ SDValue LoongArchTargetLowering::lowerConstantFP(SDValue Op,
case MVT::f32: {
SDValue NewVal = DAG.getConstant(INTVal, DL, MVT::i32);
if (Subtarget.is64Bit())
- NewVal = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, NewVal);
+ NewVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, NewVal);
return DAG.getNode(Subtarget.is64Bit() ? LoongArchISD::MOVGR2FR_W_LA64
: LoongArchISD::MOVGR2FR_W,
DL, VT, NewVal);
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-half.ll b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
index d111cf2fcfc07..50f7d408da5c0 100644
--- a/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
@@ -284,7 +284,6 @@ define i32 @caller_half_in_fregs() nounwind {
; LA64S-NEXT: addi.d $sp, $sp, -16
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64S-NEXT: lu12i.w $a0, -12
-; LA64S-NEXT: lu32i.d $a0, 0
; LA64S-NEXT: movgr2fr.w $fa0, $a0
; LA64S-NEXT: ori $a0, $zero, 1
; LA64S-NEXT: ori $a1, $zero, 2
@@ -326,7 +325,6 @@ define i32 @caller_half_in_fregs() nounwind {
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64F-LP64D-NEXT: lu12i.w $a0, -12
-; LA64F-LP64D-NEXT: lu32i.d $a0, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a0
; LA64F-LP64D-NEXT: ori $a0, $zero, 1
; LA64F-LP64D-NEXT: ori $a1, $zero, 2
@@ -368,7 +366,6 @@ define i32 @caller_half_in_fregs() nounwind {
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64D-LP64D-NEXT: lu12i.w $a0, -12
-; LA64D-LP64D-NEXT: lu32i.d $a0, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a0
; LA64D-LP64D-NEXT: ori $a0, $zero, 1
; LA64D-LP64D-NEXT: ori $a1, $zero, 2
@@ -688,32 +685,23 @@ define i32 @caller_half_in_gregs() nounwind {
; LA64S-NEXT: addi.d $sp, $sp, -16
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64S-NEXT: lu12i.w $a1, -12
+; LA64S-NEXT: movgr2fr.w $fa1, $a1
; LA64S-NEXT: ori $a0, $a1, 2176
+; LA64S-NEXT: lu12i.w $a2, -13
+; LA64S-NEXT: ori $a2, $a2, 3072
+; LA64S-NEXT: movgr2fr.w $fa0, $a2
; LA64S-NEXT: ori $a2, $a1, 512
-; LA64S-NEXT: ori $a3, $a1, 1024
-; LA64S-NEXT: ori $a4, $a1, 1280
-; LA64S-NEXT: ori $a5, $a1, 1536
-; LA64S-NEXT: ori $a6, $a1, 1792
-; LA64S-NEXT: ori $a7, $a1, 2048
-; LA64S-NEXT: lu32i.d $a1, 0
-; LA64S-NEXT: movgr2fr.w $fa1, $a1
-; LA64S-NEXT: lu12i.w $a1, -13
-; LA64S-NEXT: ori $a1, $a1, 3072
-; LA64S-NEXT: lu32i.d $a1, 0
-; LA64S-NEXT: movgr2fr.w $fa0, $a1
-; LA64S-NEXT: lu32i.d $a2, 0
; LA64S-NEXT: movgr2fr.w $fa2, $a2
-; LA64S-NEXT: lu32i.d $a3, 0
-; LA64S-NEXT: movgr2fr.w $fa3, $a3
-; LA64S-NEXT: lu32i.d $a4, 0
-; LA64S-NEXT: movgr2fr.w $fa4, $a4
-; LA64S-NEXT: lu32i.d $a5, 0
-; LA64S-NEXT: movgr2fr.w $fa5, $a5
-; LA64S-NEXT: lu32i.d $a0, 0
-; LA64S-NEXT: lu32i.d $a6, 0
-; LA64S-NEXT: movgr2fr.w $fa6, $a6
-; LA64S-NEXT: lu32i.d $a7, 0
-; LA64S-NEXT: movgr2fr.w $fa7, $a7
+; LA64S-NEXT: ori $a2, $a1, 1024
+; LA64S-NEXT: movgr2fr.w $fa3, $a2
+; LA64S-NEXT: ori $a2, $a1, 1280
+; LA64S-NEXT: movgr2fr.w $fa4, $a2
+; LA64S-NEXT: ori $a2, $a1, 1536
+; LA64S-NEXT: movgr2fr.w $fa5, $a2
+; LA64S-NEXT: ori $a2, $a1, 1792
+; LA64S-NEXT: movgr2fr.w $fa6, $a2
+; LA64S-NEXT: ori $a1, $a1, 2048
+; LA64S-NEXT: movgr2fr.w $fa7, $a1
; LA64S-NEXT: ori $a1, $zero, 10
; LA64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
; LA64S-NEXT: jirl $ra, $ra, 0
@@ -730,22 +718,14 @@ define i32 @caller_half_in_gregs() nounwind {
; LA64F-LP64S-NEXT: lu12i.w $a1, -12
; LA64F-LP64S-NEXT: ori $t0, $a1, 2176
; LA64F-LP64S-NEXT: lu32i.d $t0, 0
+; LA64F-LP64S-NEXT: lu12i.w $a0, -13
+; LA64F-LP64S-NEXT: ori $a0, $a0, 3072
; LA64F-LP64S-NEXT: ori $a2, $a1, 512
; LA64F-LP64S-NEXT: ori $a3, $a1, 1024
; LA64F-LP64S-NEXT: ori $a4, $a1, 1280
; LA64F-LP64S-NEXT: ori $a5, $a1, 1536
; LA64F-LP64S-NEXT: ori $a6, $a1, 1792
; LA64F-LP64S-NEXT: ori $a7, $a1, 2048
-; LA64F-LP64S-NEXT: lu32i.d $a1, 0
-; LA64F-LP64S-NEXT: lu12i.w $a0, -13
-; LA64F-LP64S-NEXT: ori $a0, $a0, 3072
-; LA64F-LP64S-NEXT: lu32i.d $a0, 0
-; LA64F-LP64S-NEXT: lu32i.d $a2, 0
-; LA64F-LP64S-NEXT: lu32i.d $a3, 0
-; LA64F-LP64S-NEXT: lu32i.d $a4, 0
-; LA64F-LP64S-NEXT: lu32i.d $a5, 0
-; LA64F-LP64S-NEXT: lu32i.d $a6, 0
-; LA64F-LP64S-NEXT: lu32i.d $a7, 0
; LA64F-LP64S-NEXT: st.w $t0, $sp, 0
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
@@ -758,32 +738,23 @@ define i32 @caller_half_in_gregs() nounwind {
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64F-LP64D-NEXT: lu12i.w $a1, -12
+; LA64F-LP64D-NEXT: movgr2fr.w $fa1, $a1
; LA64F-LP64D-NEXT: ori $a0, $a1, 2176
+; LA64F-LP64D-NEXT: lu12i.w $a2, -13
+; LA64F-LP64D-NEXT: ori $a2, $a2, 3072
+; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a2
; LA64F-LP64D-NEXT: ori $a2, $a1, 512
-; LA64F-LP64D-NEXT: ori $a3, $a1, 1024
-; LA64F-LP64D-NEXT: ori $a4, $a1, 1280
-; LA64F-LP64D-NEXT: ori $a5, $a1, 1536
-; LA64F-LP64D-NEXT: ori $a6, $a1, 1792
-; LA64F-LP64D-NEXT: ori $a7, $a1, 2048
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
-; LA64F-LP64D-NEXT: movgr2fr.w $fa1, $a1
-; LA64F-LP64D-NEXT: lu12i.w $a1, -13
-; LA64F-LP64D-NEXT: ori $a1, $a1, 3072
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
-; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a1
-; LA64F-LP64D-NEXT: lu32i.d $a2, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa2, $a2
-; LA64F-LP64D-NEXT: lu32i.d $a3, 0
-; LA64F-LP64D-NEXT: movgr2fr.w $fa3, $a3
-; LA64F-LP64D-NEXT: lu32i.d $a4, 0
-; LA64F-LP64D-NEXT: movgr2fr.w $fa4, $a4
-; LA64F-LP64D-NEXT: lu32i.d $a5, 0
-; LA64F-LP64D-NEXT: movgr2fr.w $fa5, $a5
-; LA64F-LP64D-NEXT: lu32i.d $a0, 0
-; LA64F-LP64D-NEXT: lu32i.d $a6, 0
-; LA64F-LP64D-NEXT: movgr2fr.w $fa6, $a6
-; LA64F-LP64D-NEXT: lu32i.d $a7, 0
-; LA64F-LP64D-NEXT: movgr2fr.w $fa7, $a7
+; LA64F-LP64D-NEXT: ori $a2, $a1, 1024
+; LA64F-LP64D-NEXT: movgr2fr.w $fa3, $a2
+; LA64F-LP64D-NEXT: ori $a2, $a1, 1280
+; LA64F-LP64D-NEXT: movgr2fr.w $fa4, $a2
+; LA64F-LP64D-NEXT: ori $a2, $a1, 1536
+; LA64F-LP64D-NEXT: movgr2fr.w $fa5, $a2
+; LA64F-LP64D-NEXT: ori $a2, $a1, 1792
+; LA64F-LP64D-NEXT: movgr2fr.w $fa6, $a2
+; LA64F-LP64D-NEXT: ori $a1, $a1, 2048
+; LA64F-LP64D-NEXT: movgr2fr.w $fa7, $a1
; LA64F-LP64D-NEXT: ori $a1, $zero, 10
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
@@ -800,22 +771,14 @@ define i32 @caller_half_in_gregs() nounwind {
; LA64D-LP64S-NEXT: lu12i.w $a1, -12
; LA64D-LP64S-NEXT: ori $t0, $a1, 2176
; LA64D-LP64S-NEXT: lu32i.d $t0, 0
+; LA64D-LP64S-NEXT: lu12i.w $a0, -13
+; LA64D-LP64S-NEXT: ori $a0, $a0, 3072
; LA64D-LP64S-NEXT: ori $a2, $a1, 512
; LA64D-LP64S-NEXT: ori $a3, $a1, 1024
; LA64D-LP64S-NEXT: ori $a4, $a1, 1280
; LA64D-LP64S-NEXT: ori $a5, $a1, 1536
; LA64D-LP64S-NEXT: ori $a6, $a1, 1792
; LA64D-LP64S-NEXT: ori $a7, $a1, 2048
-; LA64D-LP64S-NEXT: lu32i.d $a1, 0
-; LA64D-LP64S-NEXT: lu12i.w $a0, -13
-; LA64D-LP64S-NEXT: ori $a0, $a0, 3072
-; LA64D-LP64S-NEXT: lu32i.d $a0, 0
-; LA64D-LP64S-NEXT: lu32i.d $a2, 0
-; LA64D-LP64S-NEXT: lu32i.d $a3, 0
-; LA64D-LP64S-NEXT: lu32i.d $a4, 0
-; LA64D-LP64S-NEXT: lu32i.d $a5, 0
-; LA64D-LP64S-NEXT: lu32i.d $a6, 0
-; LA64D-LP64S-NEXT: lu32i.d $a7, 0
; LA64D-LP64S-NEXT: st.w $t0, $sp, 0
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
@@ -828,32 +791,23 @@ define i32 @caller_half_in_gregs() nounwind {
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64D-LP64D-NEXT: lu12i.w $a1, -12
+; LA64D-LP64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-LP64D-NEXT: ori $a0, $a1, 2176
+; LA64D-LP64D-NEXT: lu12i.w $a2, -13
+; LA64D-LP64D-NEXT: ori $a2, $a2, 3072
+; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a2
; LA64D-LP64D-NEXT: ori $a2, $a1, 512
-; LA64D-LP64D-NEXT: ori $a3, $a1, 1024
-; LA64D-LP64D-NEXT: ori $a4, $a1, 1280
-; LA64D-LP64D-NEXT: ori $a5, $a1, 1536
-; LA64D-LP64D-NEXT: ori $a6, $a1, 1792
-; LA64D-LP64D-NEXT: ori $a7, $a1, 2048
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
-; LA64D-LP64D-NEXT: movgr2fr.w $fa1, $a1
-; LA64D-LP64D-NEXT: lu12i.w $a1, -13
-; LA64D-LP64D-NEXT: ori $a1, $a1, 3072
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
-; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a1
-; LA64D-LP64D-NEXT: lu32i.d $a2, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa2, $a2
-; LA64D-LP64D-NEXT: lu32i.d $a3, 0
-; LA64D-LP64D-NEXT: movgr2fr.w $fa3, $a3
-; LA64D-LP64D-NEXT: lu32i.d $a4, 0
-; LA64D-LP64D-NEXT: movgr2fr.w $fa4, $a4
-; LA64D-LP64D-NEXT: lu32i.d $a5, 0
-; LA64D-LP64D-NEXT: movgr2fr.w $fa5, $a5
-; LA64D-LP64D-NEXT: lu32i.d $a0, 0
-; LA64D-LP64D-NEXT: lu32i.d $a6, 0
-; LA64D-LP64D-NEXT: movgr2fr.w $fa6, $a6
-; LA64D-LP64D-NEXT: lu32i.d $a7, 0
-; LA64D-LP64D-NEXT: movgr2fr.w $fa7, $a7
+; LA64D-LP64D-NEXT: ori $a2, $a1, 1024
+; LA64D-LP64D-NEXT: movgr2fr.w $fa3, $a2
+; LA64D-LP64D-NEXT: ori $a2, $a1, 1280
+; LA64D-LP64D-NEXT: movgr2fr.w $fa4, $a2
+; LA64D-LP64D-NEXT: ori $a2, $a1, 1536
+; LA64D-LP64D-NEXT: movgr2fr.w $fa5, $a2
+; LA64D-LP64D-NEXT: ori $a2, $a1, 1792
+; LA64D-LP64D-NEXT: movgr2fr.w $fa6, $a2
+; LA64D-LP64D-NEXT: ori $a1, $a1, 2048
+; LA64D-LP64D-NEXT: movgr2fr.w $fa7, $a1
; LA64D-LP64D-NEXT: ori $a1, $zero, 10
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
@@ -1231,28 +1185,20 @@ define i32 @caller_half_on_stack() nounwind {
; LA64S-NEXT: ori $t0, $a0, 3200
; LA64S-NEXT: lu32i.d $t0, 0
; LA64S-NEXT: ori $a1, $a0, 2304
-; LA64S-NEXT: lu32i.d $a1, 0
; LA64S-NEXT: movgr2fr.w $fa0, $a1
; LA64S-NEXT: ori $a1, $a0, 2432
-; LA64S-NEXT: lu32i.d $a1, 0
; LA64S-NEXT: movgr2fr.w $fa1, $a1
; LA64S-NEXT: ori $a1, $a0, 2560
-; LA64S-NEXT: lu32i.d $a1, 0
; LA64S-NEXT: movgr2fr.w $fa2, $a1
; LA64S-NEXT: ori $a1, $a0, 2688
-; LA64S-NEXT: lu32i.d $a1, 0
; LA64S-NEXT: movgr2fr.w $fa3, $a1
; LA64S-NEXT: ori $a1, $a0, 2816
-; LA64S-NEXT: lu32i.d $a1, 0
; LA64S-NEXT: movgr2fr.w $fa4, $a1
; LA64S-NEXT: ori $a1, $a0, 2944
-; LA64S-NEXT: lu32i.d $a1, 0
; LA64S-NEXT: movgr2fr.w $fa5, $a1
; LA64S-NEXT: ori $a1, $a0, 3072
-; LA64S-NEXT: lu32i.d $a1, 0
; LA64S-NEXT: movgr2fr.w $fa6, $a1
; LA64S-NEXT: ori $a0, $a0, 3136
-; LA64S-NEXT: lu32i.d $a0, 0
; LA64S-NEXT: movgr2fr.w $fa7, $a0
; LA64S-NEXT: ori $a0, $zero, 1
; LA64S-NEXT: ori $a1, $zero, 2
@@ -1323,28 +1269,20 @@ define i32 @caller_half_on_stack() nounwind {
; LA64F-LP64D-NEXT: ori $t0, $a0, 3200
; LA64F-LP64D-NEXT: lu32i.d $t0, 0
; LA64F-LP64D-NEXT: ori $a1, $a0, 2304
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a1
; LA64F-LP64D-NEXT: ori $a1, $a0, 2432
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa1, $a1
; LA64F-LP64D-NEXT: ori $a1, $a0, 2560
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa2, $a1
; LA64F-LP64D-NEXT: ori $a1, $a0, 2688
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa3, $a1
; LA64F-LP64D-NEXT: ori $a1, $a0, 2816
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa4, $a1
; LA64F-LP64D-NEXT: ori $a1, $a0, 2944
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa5, $a1
; LA64F-LP64D-NEXT: ori $a1, $a0, 3072
-; LA64F-LP64D-NEXT: lu32i.d $a1, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa6, $a1
; LA64F-LP64D-NEXT: ori $a0, $a0, 3136
-; LA64F-LP64D-NEXT: lu32i.d $a0, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa7, $a0
; LA64F-LP64D-NEXT: ori $a0, $zero, 1
; LA64F-LP64D-NEXT: ori $a1, $zero, 2
@@ -1415,28 +1353,20 @@ define i32 @caller_half_on_stack() nounwind {
; LA64D-LP64D-NEXT: ori $t0, $a0, 3200
; LA64D-LP64D-NEXT: lu32i.d $t0, 0
; LA64D-LP64D-NEXT: ori $a1, $a0, 2304
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a1
; LA64D-LP64D-NEXT: ori $a1, $a0, 2432
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-LP64D-NEXT: ori $a1, $a0, 2560
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa2, $a1
; LA64D-LP64D-NEXT: ori $a1, $a0, 2688
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa3, $a1
; LA64D-LP64D-NEXT: ori $a1, $a0, 2816
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa4, $a1
; LA64D-LP64D-NEXT: ori $a1, $a0, 2944
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa5, $a1
; LA64D-LP64D-NEXT: ori $a1, $a0, 3072
-; LA64D-LP64D-NEXT: lu32i.d $a1, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa6, $a1
; LA64D-LP64D-NEXT: ori $a0, $a0, 3136
-; LA64D-LP64D-NEXT: lu32i.d $a0, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa7, $a0
; LA64D-LP64D-NEXT: ori $a0, $zero, 1
; LA64D-LP64D-NEXT: ori $a1, $zero, 2
@@ -1493,7 +1423,6 @@ define half @callee_half_ret() nounwind {
; LA64S: # %bb.0:
; LA64S-NEXT: lu12i.w $a0, -13
; LA64S-NEXT: ori $a0, $a0, 3072
-; LA64S-NEXT: lu32i.d $a0, 0
; LA64S-NEXT: movgr2fr.w $fa0, $a0
; LA64S-NEXT: ret
;
@@ -1501,14 +1430,12 @@ define half @callee_half_ret() nounwind {
; LA64F-LP64S: # %bb.0:
; LA64F-LP64S-NEXT: lu12i.w $a0, -13
; LA64F-LP64S-NEXT: ori $a0, $a0, 3072
-; LA64F-LP64S-NEXT: lu32i.d $a0, 0
; LA64F-LP64S-NEXT: ret
;
; LA64F-LP64D-LABEL: callee_half_ret:
; LA64F-LP64D: # %bb.0:
; LA64F-LP64D-NEXT: lu12i.w $a0, -13
; LA64F-LP64D-NEXT: ori $a0, $a0, 3072
-; LA64F-LP64D-NEXT: lu32i.d $a0, 0
; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a0
; LA64F-LP64D-NEXT: ret
;
@@ -1516,14 +1443,12 @@ define half @callee_half_ret() nounwind {
; LA64D-LP64S: # %bb.0:
; LA64D-LP64S-NEXT: lu12i.w $a0, -13
; LA64D-LP64S-NEXT: ori $a0, $a0, 3072
-; LA64D-LP64S-NEXT: lu32i.d $a0, 0
; LA64D-LP64S-NEXT: ret
;
; LA64D-LP64D-LABEL: callee_half_ret:
; LA64D-LP64D: # %bb.0:
; LA64D-LP64D-NEXT: lu12i.w $a0, -13
; LA64D-LP64D-NEXT: ori $a0, $a0, 3072
-; LA64D-LP64D-NEXT: lu32i.d $a0, 0
; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a0
; LA64D-LP64D-NEXT: ret
ret half 1.0
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
index a6e3f790943aa..0d0fb213539ab 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
@@ -76,7 +76,6 @@ define float @float_fsub_acquire(ptr %p) nounwind {
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: lu12i.w $a1, -264192
-; LA64F-NEXT: lu32i.d $a1, 0
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB1_1: # %atomicrmw.start
@@ -641,7 +640,6 @@ define float @float_fsub_release(ptr %p) nounwind {
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: lu12i.w $a1, -264192
-; LA64F-NEXT: lu32i.d $a1, 0
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB9_1: # %atomicrmw.start
@@ -1206,7 +1204,6 @@ define float @float_fsub_acq_rel(ptr %p) nounwind {
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: lu12i.w $a1, -264192
-; LA64F-NEXT: lu32i.d $a1, 0
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB17_1: # %atomicrmw.start
@@ -1771,7 +1768,6 @@ define float @float_fsub_seq_cst(ptr %p) nounwind {
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: lu12i.w $a1, -264192
-; LA64F-NEXT: lu32i.d $a1, 0
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB25_1: # %atomicrmw.start
@@ -2336,7 +2332,6 @@ define float @float_fsub_monotonic(ptr %p) nounwind {
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: lu12i.w $a1, -264192
-; LA64F-NEXT: lu32i.d $a1, 0
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB33_1: # %atomicrmw.start
More information about the llvm-commits
mailing list