[llvm] 3ad4f92 - [DAG] More aggressively (extract_vector_elt (build_vector x, y), c) iff element is zero constant

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 18 09:32:01 PDT 2023


Author: Simon Pilgrim
Date: 2023-07-18T17:31:34+01:00
New Revision: 3ad4f92f83b7a6a79957c9413dfa3a2667c236a6

URL: https://github.com/llvm/llvm-project/commit/3ad4f92f83b7a6a79957c9413dfa3a2667c236a6
DIFF: https://github.com/llvm/llvm-project/commit/3ad4f92f83b7a6a79957c9413dfa3a2667c236a6.diff

LOG: [DAG] More aggressively (extract_vector_elt (build_vector x, y), c) iff element is zero constant

We currently don't extract vector elements from multi-use build vectors unless TLI.aggressivelyPreferBuildVectorSources accepts them, which seems a little extreme for constant build vectors (especially as under some cases ComputeKnownBits will indirectly extract the data for us).

This is causing a few regressions in some upcoming SimplifyDemandedBits work I'm looking at, all of which just need to know that the element is zero, so I've tweaked the fold to accept zero elements as well, which will typically fold very easily.

Differential Revision: https://reviews.llvm.org/D155582

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
    llvm/test/CodeGen/Mips/cconv/vector.ll
    llvm/test/CodeGen/SPARC/float-constants.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 579af998083f35..9f222268fc1277 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -21702,8 +21702,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
   // extract_vector_elt (build_vector x, y), 1 -> y
   if (((IndexC && VecOp.getOpcode() == ISD::BUILD_VECTOR) ||
        VecOp.getOpcode() == ISD::SPLAT_VECTOR) &&
-      TLI.isTypeLegal(VecVT) &&
-      (VecOp.hasOneUse() || TLI.aggressivelyPreferBuildVectorSources(VecVT))) {
+      TLI.isTypeLegal(VecVT)) {
     assert((VecOp.getOpcode() != ISD::BUILD_VECTOR ||
             VecVT.isFixedLengthVector()) &&
            "BUILD_VECTOR used for scalable vectors");
@@ -21712,12 +21711,15 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
     SDValue Elt = VecOp.getOperand(IndexVal);
     EVT InEltVT = Elt.getValueType();
 
-    // Sometimes build_vector's scalar input types do not match result type.
-    if (ScalarVT == InEltVT)
-      return Elt;
+    if (VecOp.hasOneUse() || TLI.aggressivelyPreferBuildVectorSources(VecVT) ||
+        isNullConstant(Elt)) {
+      // Sometimes build_vector's scalar input types do not match result type.
+      if (ScalarVT == InEltVT)
+        return Elt;
 
-    // TODO: It may be useful to truncate if free if the build_vector implicitly
-    // converts.
+      // TODO: It may be useful to truncate if free if the build_vector
+      // implicitly converts.
+    }
   }
 
   if (SDValue BO = scalarizeExtractedBinop(N, DAG, LegalOperations))

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
index 2f5c89a4b04200..5d4df94807d063 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
@@ -277,8 +277,7 @@ define void @insert_vec_v16i8_uaddlv_from_v8i8(ptr %0) {
 ; CHECK-NEXT:    movi.2d v1, #0000000000000000
 ; CHECK-NEXT:    uaddlv.8b h2, v0
 ; CHECK-NEXT:    stp q0, q0, [x0, #32]
-; CHECK-NEXT:    mov.b v1[0], v2[0]
-; CHECK-NEXT:    zip1.8b v1, v1, v0
+; CHECK-NEXT:    mov.h v1[0], v2[0]
 ; CHECK-NEXT:    bic.4h v1, #255, lsl #8
 ; CHECK-NEXT:    ushll.4s v1, v1, #0
 ; CHECK-NEXT:    ucvtf.4s v1, v1
@@ -452,14 +451,13 @@ define void @insert_vec_v16i8_uaddlv_from_v4i32(ptr %0) {
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
 ; CHECK-NEXT:    movi.2d v1, #0000000000000000
 ; CHECK-NEXT:    uaddlv.4s d0, v0
-; CHECK-NEXT:    mov.b v1[0], v0[0]
-; CHECK-NEXT:    zip1.8b v0, v1, v0
-; CHECK-NEXT:    movi.2d v1, #0000000000000000
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
-; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    stp q1, q1, [x0, #32]
-; CHECK-NEXT:    ucvtf.4s v0, v0
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    mov.h v1[0], v0[0]
+; CHECK-NEXT:    movi.2d v0, #0000000000000000
+; CHECK-NEXT:    bic.4h v1, #255, lsl #8
+; CHECK-NEXT:    ushll.4s v1, v1, #0
+; CHECK-NEXT:    stp q0, q0, [x0, #32]
+; CHECK-NEXT:    ucvtf.4s v1, v1
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
 
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
index cefe83639e1486..cc0d76dcbbf43b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
@@ -42,18 +42,16 @@ define <2 x i64> @load_zext_v2i32i64(ptr %ap) #0 {
 define <2 x i256> @load_zext_v2i64i256(ptr %ap) #0 {
 ; CHECK-LABEL: load_zext_v2i64i256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    ldr q1, [x0]
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x2, d0
-; CHECK-NEXT:    fmov x3, d2
+; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    mov x1, xzr
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    fmov x0, d1
-; CHECK-NEXT:    fmov x4, d0
+; CHECK-NEXT:    mov x2, xzr
+; CHECK-NEXT:    mov x3, xzr
 ; CHECK-NEXT:    mov x5, xzr
-; CHECK-NEXT:    mov x6, x2
-; CHECK-NEXT:    mov x7, x3
+; CHECK-NEXT:    mov x6, xzr
+; CHECK-NEXT:    mov z1.d, z0.d[1]
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    fmov x4, d1
+; CHECK-NEXT:    mov x7, xzr
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %ap
   %val = zext <2 x i64> %a to <2 x i256>

diff  --git a/llvm/test/CodeGen/Mips/cconv/vector.ll b/llvm/test/CodeGen/Mips/cconv/vector.ll
index 6a56c861cdd378..f027c540fe304b 100644
--- a/llvm/test/CodeGen/Mips/cconv/vector.ll
+++ b/llvm/test/CodeGen/Mips/cconv/vector.ll
@@ -5118,43 +5118,41 @@ define void @calli64_2() {
 ; MIPS64-NEXT:    jr $ra
 ; MIPS64-NEXT:    nop
 ;
-; MIPS32R5-LABEL: calli64_2:
-; MIPS32R5:       # %bb.0: # %entry
-; MIPS32R5-NEXT:    addiu $sp, $sp, -40
-; MIPS32R5-NEXT:    .cfi_def_cfa_offset 40
-; MIPS32R5-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
-; MIPS32R5-NEXT:    .cfi_offset 31, -4
-; MIPS32R5-NEXT:    lui $1, %hi($CPI36_0)
-; MIPS32R5-NEXT:    addiu $1, $1, %lo($CPI36_0)
-; MIPS32R5-NEXT:    ld.w $w0, 0($1)
-; MIPS32R5-NEXT:    copy_s.w $4, $w0[0]
-; MIPS32R5-NEXT:    copy_s.w $5, $w0[1]
-; MIPS32R5-NEXT:    copy_s.w $6, $w0[2]
-; MIPS32R5-NEXT:    copy_s.w $7, $w0[3]
-; MIPS32R5-NEXT:    lui $1, %hi($CPI36_1)
-; MIPS32R5-NEXT:    addiu $1, $1, %lo($CPI36_1)
-; MIPS32R5-NEXT:    ld.w $w0, 0($1)
-; MIPS32R5-NEXT:    copy_s.w $1, $w0[0]
-; MIPS32R5-NEXT:    copy_s.w $2, $w0[1]
-; MIPS32R5-NEXT:    copy_s.w $3, $w0[2]
-; MIPS32R5-NEXT:    copy_s.w $8, $w0[3]
-; MIPS32R5-NEXT:    sw $8, 28($sp)
-; MIPS32R5-NEXT:    sw $3, 24($sp)
-; MIPS32R5-NEXT:    sw $2, 20($sp)
-; MIPS32R5-NEXT:    sw $1, 16($sp)
-; MIPS32R5-NEXT:    jal i64_2
-; MIPS32R5-NEXT:    nop
-; MIPS32R5-NEXT:    lui $1, %hi(gv2i64)
-; MIPS32R5-NEXT:    insert.w $w0[0], $2
-; MIPS32R5-NEXT:    insert.w $w0[1], $3
-; MIPS32R5-NEXT:    addiu $1, $1, %lo(gv2i64)
-; MIPS32R5-NEXT:    insert.w $w0[2], $4
-; MIPS32R5-NEXT:    insert.w $w0[3], $5
-; MIPS32R5-NEXT:    st.w $w0, 0($1)
-; MIPS32R5-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
-; MIPS32R5-NEXT:    addiu $sp, $sp, 40
-; MIPS32R5-NEXT:    jr $ra
-; MIPS32R5-NEXT:    nop
+; MIPS32R5EB-LABEL: calli64_2:
+; MIPS32R5EB:       # %bb.0: # %entry
+; MIPS32R5EB-NEXT:    addiu $sp, $sp, -40
+; MIPS32R5EB-NEXT:    .cfi_def_cfa_offset 40
+; MIPS32R5EB-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPS32R5EB-NEXT:    .cfi_offset 31, -4
+; MIPS32R5EB-NEXT:    lui $1, %hi($CPI36_0)
+; MIPS32R5EB-NEXT:    addiu $1, $1, %lo($CPI36_0)
+; MIPS32R5EB-NEXT:    ld.w $w0, 0($1)
+; MIPS32R5EB-NEXT:    copy_s.w $5, $w0[1]
+; MIPS32R5EB-NEXT:    copy_s.w $7, $w0[3]
+; MIPS32R5EB-NEXT:    lui $1, %hi($CPI36_1)
+; MIPS32R5EB-NEXT:    addiu $1, $1, %lo($CPI36_1)
+; MIPS32R5EB-NEXT:    ld.w $w0, 0($1)
+; MIPS32R5EB-NEXT:    copy_s.w $1, $w0[1]
+; MIPS32R5EB-NEXT:    copy_s.w $2, $w0[3]
+; MIPS32R5EB-NEXT:    sw $2, 28($sp)
+; MIPS32R5EB-NEXT:    sw $1, 20($sp)
+; MIPS32R5EB-NEXT:    sw $zero, 24($sp)
+; MIPS32R5EB-NEXT:    sw $zero, 16($sp)
+; MIPS32R5EB-NEXT:    addiu $4, $zero, 0
+; MIPS32R5EB-NEXT:    addiu $6, $zero, 0
+; MIPS32R5EB-NEXT:    jal i64_2
+; MIPS32R5EB-NEXT:    nop
+; MIPS32R5EB-NEXT:    lui $1, %hi(gv2i64)
+; MIPS32R5EB-NEXT:    insert.w $w0[0], $2
+; MIPS32R5EB-NEXT:    insert.w $w0[1], $3
+; MIPS32R5EB-NEXT:    addiu $1, $1, %lo(gv2i64)
+; MIPS32R5EB-NEXT:    insert.w $w0[2], $4
+; MIPS32R5EB-NEXT:    insert.w $w0[3], $5
+; MIPS32R5EB-NEXT:    st.w $w0, 0($1)
+; MIPS32R5EB-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPS32R5EB-NEXT:    addiu $sp, $sp, 40
+; MIPS32R5EB-NEXT:    jr $ra
+; MIPS32R5EB-NEXT:    nop
 ;
 ; MIPS64R5-LABEL: calli64_2:
 ; MIPS64R5:       # %bb.0: # %entry
@@ -5212,6 +5210,42 @@ define void @calli64_2() {
 ; MIPS32EL-NEXT:    addiu $sp, $sp, 40
 ; MIPS32EL-NEXT:    jr $ra
 ; MIPS32EL-NEXT:    nop
+;
+; MIPS32R5EL-LABEL: calli64_2:
+; MIPS32R5EL:       # %bb.0: # %entry
+; MIPS32R5EL-NEXT:    addiu $sp, $sp, -40
+; MIPS32R5EL-NEXT:    .cfi_def_cfa_offset 40
+; MIPS32R5EL-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPS32R5EL-NEXT:    .cfi_offset 31, -4
+; MIPS32R5EL-NEXT:    lui $1, %hi($CPI36_0)
+; MIPS32R5EL-NEXT:    addiu $1, $1, %lo($CPI36_0)
+; MIPS32R5EL-NEXT:    ld.w $w0, 0($1)
+; MIPS32R5EL-NEXT:    copy_s.w $4, $w0[0]
+; MIPS32R5EL-NEXT:    copy_s.w $6, $w0[2]
+; MIPS32R5EL-NEXT:    lui $1, %hi($CPI36_1)
+; MIPS32R5EL-NEXT:    addiu $1, $1, %lo($CPI36_1)
+; MIPS32R5EL-NEXT:    ld.w $w0, 0($1)
+; MIPS32R5EL-NEXT:    copy_s.w $1, $w0[0]
+; MIPS32R5EL-NEXT:    copy_s.w $2, $w0[2]
+; MIPS32R5EL-NEXT:    sw $2, 24($sp)
+; MIPS32R5EL-NEXT:    sw $1, 16($sp)
+; MIPS32R5EL-NEXT:    sw $zero, 28($sp)
+; MIPS32R5EL-NEXT:    sw $zero, 20($sp)
+; MIPS32R5EL-NEXT:    addiu $5, $zero, 0
+; MIPS32R5EL-NEXT:    addiu $7, $zero, 0
+; MIPS32R5EL-NEXT:    jal i64_2
+; MIPS32R5EL-NEXT:    nop
+; MIPS32R5EL-NEXT:    lui $1, %hi(gv2i64)
+; MIPS32R5EL-NEXT:    insert.w $w0[0], $2
+; MIPS32R5EL-NEXT:    insert.w $w0[1], $3
+; MIPS32R5EL-NEXT:    addiu $1, $1, %lo(gv2i64)
+; MIPS32R5EL-NEXT:    insert.w $w0[2], $4
+; MIPS32R5EL-NEXT:    insert.w $w0[3], $5
+; MIPS32R5EL-NEXT:    st.w $w0, 0($1)
+; MIPS32R5EL-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPS32R5EL-NEXT:    addiu $sp, $sp, 40
+; MIPS32R5EL-NEXT:    jr $ra
+; MIPS32R5EL-NEXT:    nop
 entry:
   %0 = call <2 x i64> @i64_2(<2 x i64> <i64 6, i64 7>, <2 x i64> <i64 12, i64 8>)
   store <2 x i64> %0, ptr @gv2i64
@@ -5465,7 +5499,6 @@ define void @callfloat_4() {
 ; MIPS32R5-NEXT:    lui $1, %hi($CPI38_0)
 ; MIPS32R5-NEXT:    addiu $1, $1, %lo($CPI38_0)
 ; MIPS32R5-NEXT:    ld.w $w0, 0($1)
-; MIPS32R5-NEXT:    copy_s.w $6, $w0[0]
 ; MIPS32R5-NEXT:    copy_s.w $7, $w0[1]
 ; MIPS32R5-NEXT:    copy_s.w $1, $w0[2]
 ; MIPS32R5-NEXT:    copy_s.w $2, $w0[3]
@@ -5475,14 +5508,15 @@ define void @callfloat_4() {
 ; MIPS32R5-NEXT:    copy_s.w $3, $w0[0]
 ; MIPS32R5-NEXT:    copy_s.w $4, $w0[1]
 ; MIPS32R5-NEXT:    copy_s.w $5, $w0[2]
-; MIPS32R5-NEXT:    copy_s.w $8, $w0[3]
-; MIPS32R5-NEXT:    sw $8, 36($sp)
+; MIPS32R5-NEXT:    copy_s.w $6, $w0[3]
+; MIPS32R5-NEXT:    sw $6, 36($sp)
 ; MIPS32R5-NEXT:    sw $5, 32($sp)
 ; MIPS32R5-NEXT:    sw $4, 28($sp)
 ; MIPS32R5-NEXT:    sw $3, 24($sp)
 ; MIPS32R5-NEXT:    sw $2, 20($sp)
 ; MIPS32R5-NEXT:    sw $1, 16($sp)
 ; MIPS32R5-NEXT:    addiu $4, $sp, 48
+; MIPS32R5-NEXT:    addiu $6, $zero, 0
 ; MIPS32R5-NEXT:    jal float4_extern
 ; MIPS32R5-NEXT:    nop
 ; MIPS32R5-NEXT:    lui $1, %hi(gv4f32)
@@ -5642,51 +5676,48 @@ define void @calldouble_2() {
 ; MIPS64-NEXT:    jr $ra
 ; MIPS64-NEXT:    nop
 ;
-; MIPS32R5-LABEL: calldouble_2:
-; MIPS32R5:       # %bb.0: # %entry
-; MIPS32R5-NEXT:    addiu $sp, $sp, -80
-; MIPS32R5-NEXT:    .cfi_def_cfa_offset 80
-; MIPS32R5-NEXT:    sw $ra, 76($sp) # 4-byte Folded Spill
-; MIPS32R5-NEXT:    sw $fp, 72($sp) # 4-byte Folded Spill
-; MIPS32R5-NEXT:    .cfi_offset 31, -4
-; MIPS32R5-NEXT:    .cfi_offset 30, -8
-; MIPS32R5-NEXT:    move $fp, $sp
-; MIPS32R5-NEXT:    .cfi_def_cfa_register 30
-; MIPS32R5-NEXT:    addiu $1, $zero, -16
-; MIPS32R5-NEXT:    and $sp, $sp, $1
-; MIPS32R5-NEXT:    lui $1, %hi($CPI39_0)
-; MIPS32R5-NEXT:    addiu $1, $1, %lo($CPI39_0)
-; MIPS32R5-NEXT:    ld.w $w0, 0($1)
-; MIPS32R5-NEXT:    copy_s.w $6, $w0[0]
-; MIPS32R5-NEXT:    copy_s.w $7, $w0[1]
-; MIPS32R5-NEXT:    copy_s.w $1, $w0[2]
-; MIPS32R5-NEXT:    copy_s.w $2, $w0[3]
-; MIPS32R5-NEXT:    lui $3, %hi($CPI39_1)
-; MIPS32R5-NEXT:    addiu $3, $3, %lo($CPI39_1)
-; MIPS32R5-NEXT:    ld.w $w0, 0($3)
-; MIPS32R5-NEXT:    copy_s.w $3, $w0[0]
-; MIPS32R5-NEXT:    copy_s.w $4, $w0[1]
-; MIPS32R5-NEXT:    copy_s.w $5, $w0[2]
-; MIPS32R5-NEXT:    copy_s.w $8, $w0[3]
-; MIPS32R5-NEXT:    sw $8, 36($sp)
-; MIPS32R5-NEXT:    sw $5, 32($sp)
-; MIPS32R5-NEXT:    sw $4, 28($sp)
-; MIPS32R5-NEXT:    sw $3, 24($sp)
-; MIPS32R5-NEXT:    sw $2, 20($sp)
-; MIPS32R5-NEXT:    sw $1, 16($sp)
-; MIPS32R5-NEXT:    addiu $4, $sp, 48
-; MIPS32R5-NEXT:    jal double2_extern
-; MIPS32R5-NEXT:    nop
-; MIPS32R5-NEXT:    lui $1, %hi(gv2f64)
-; MIPS32R5-NEXT:    addiu $1, $1, %lo(gv2f64)
-; MIPS32R5-NEXT:    ld.d $w0, 48($sp)
-; MIPS32R5-NEXT:    st.d $w0, 0($1)
-; MIPS32R5-NEXT:    move $sp, $fp
-; MIPS32R5-NEXT:    lw $fp, 72($sp) # 4-byte Folded Reload
-; MIPS32R5-NEXT:    lw $ra, 76($sp) # 4-byte Folded Reload
-; MIPS32R5-NEXT:    addiu $sp, $sp, 80
-; MIPS32R5-NEXT:    jr $ra
-; MIPS32R5-NEXT:    nop
+; MIPS32R5EB-LABEL: calldouble_2:
+; MIPS32R5EB:       # %bb.0: # %entry
+; MIPS32R5EB-NEXT:    addiu $sp, $sp, -80
+; MIPS32R5EB-NEXT:    .cfi_def_cfa_offset 80
+; MIPS32R5EB-NEXT:    sw $ra, 76($sp) # 4-byte Folded Spill
+; MIPS32R5EB-NEXT:    sw $fp, 72($sp) # 4-byte Folded Spill
+; MIPS32R5EB-NEXT:    .cfi_offset 31, -4
+; MIPS32R5EB-NEXT:    .cfi_offset 30, -8
+; MIPS32R5EB-NEXT:    move $fp, $sp
+; MIPS32R5EB-NEXT:    .cfi_def_cfa_register 30
+; MIPS32R5EB-NEXT:    addiu $1, $zero, -16
+; MIPS32R5EB-NEXT:    and $sp, $sp, $1
+; MIPS32R5EB-NEXT:    lui $1, %hi($CPI39_0)
+; MIPS32R5EB-NEXT:    addiu $1, $1, %lo($CPI39_0)
+; MIPS32R5EB-NEXT:    ld.w $w0, 0($1)
+; MIPS32R5EB-NEXT:    copy_s.w $1, $w0[2]
+; MIPS32R5EB-NEXT:    lui $2, %hi($CPI39_1)
+; MIPS32R5EB-NEXT:    addiu $2, $2, %lo($CPI39_1)
+; MIPS32R5EB-NEXT:    ld.w $w0, 0($2)
+; MIPS32R5EB-NEXT:    copy_s.w $2, $w0[0]
+; MIPS32R5EB-NEXT:    copy_s.w $3, $w0[2]
+; MIPS32R5EB-NEXT:    sw $3, 32($sp)
+; MIPS32R5EB-NEXT:    sw $2, 24($sp)
+; MIPS32R5EB-NEXT:    sw $1, 16($sp)
+; MIPS32R5EB-NEXT:    sw $zero, 36($sp)
+; MIPS32R5EB-NEXT:    sw $zero, 28($sp)
+; MIPS32R5EB-NEXT:    sw $zero, 20($sp)
+; MIPS32R5EB-NEXT:    addiu $4, $sp, 48
+; MIPS32R5EB-NEXT:    addiu $6, $zero, 0
+; MIPS32R5EB-NEXT:    addiu $7, $zero, 0
+; MIPS32R5EB-NEXT:    jal double2_extern
+; MIPS32R5EB-NEXT:    nop
+; MIPS32R5EB-NEXT:    lui $1, %hi(gv2f64)
+; MIPS32R5EB-NEXT:    addiu $1, $1, %lo(gv2f64)
+; MIPS32R5EB-NEXT:    ld.d $w0, 48($sp)
+; MIPS32R5EB-NEXT:    st.d $w0, 0($1)
+; MIPS32R5EB-NEXT:    move $sp, $fp
+; MIPS32R5EB-NEXT:    lw $fp, 72($sp) # 4-byte Folded Reload
+; MIPS32R5EB-NEXT:    lw $ra, 76($sp) # 4-byte Folded Reload
+; MIPS32R5EB-NEXT:    addiu $sp, $sp, 80
+; MIPS32R5EB-NEXT:    jr $ra
+; MIPS32R5EB-NEXT:    nop
 ;
 ; MIPS64R5-LABEL: calldouble_2:
 ; MIPS64R5:       # %bb.0: # %entry
@@ -5702,7 +5733,6 @@ define void @calldouble_2() {
 ; MIPS64R5-NEXT:    ld $1, %got_page(.LCPI39_0)($gp)
 ; MIPS64R5-NEXT:    daddiu $1, $1, %got_ofst(.LCPI39_0)
 ; MIPS64R5-NEXT:    ld.d $w0, 0($1)
-; MIPS64R5-NEXT:    copy_s.d $4, $w0[0]
 ; MIPS64R5-NEXT:    copy_s.d $5, $w0[1]
 ; MIPS64R5-NEXT:    ld $1, %got_page(.LCPI39_1)($gp)
 ; MIPS64R5-NEXT:    daddiu $1, $1, %got_ofst(.LCPI39_1)
@@ -5710,6 +5740,7 @@ define void @calldouble_2() {
 ; MIPS64R5-NEXT:    copy_s.d $6, $w0[0]
 ; MIPS64R5-NEXT:    copy_s.d $7, $w0[1]
 ; MIPS64R5-NEXT:    ld $25, %call16(double2_extern)($gp)
+; MIPS64R5-NEXT:    daddiu $4, $zero, 0
 ; MIPS64R5-NEXT:    jalr $25
 ; MIPS64R5-NEXT:    nop
 ; MIPS64R5-NEXT:    insert.d $w0[0], $2
@@ -5760,6 +5791,49 @@ define void @calldouble_2() {
 ; MIPS32EL-NEXT:    addiu $sp, $sp, 80
 ; MIPS32EL-NEXT:    jr $ra
 ; MIPS32EL-NEXT:    nop
+;
+; MIPS32R5EL-LABEL: calldouble_2:
+; MIPS32R5EL:       # %bb.0: # %entry
+; MIPS32R5EL-NEXT:    addiu $sp, $sp, -80
+; MIPS32R5EL-NEXT:    .cfi_def_cfa_offset 80
+; MIPS32R5EL-NEXT:    sw $ra, 76($sp) # 4-byte Folded Spill
+; MIPS32R5EL-NEXT:    sw $fp, 72($sp) # 4-byte Folded Spill
+; MIPS32R5EL-NEXT:    .cfi_offset 31, -4
+; MIPS32R5EL-NEXT:    .cfi_offset 30, -8
+; MIPS32R5EL-NEXT:    move $fp, $sp
+; MIPS32R5EL-NEXT:    .cfi_def_cfa_register 30
+; MIPS32R5EL-NEXT:    addiu $1, $zero, -16
+; MIPS32R5EL-NEXT:    and $sp, $sp, $1
+; MIPS32R5EL-NEXT:    lui $1, %hi($CPI39_0)
+; MIPS32R5EL-NEXT:    addiu $1, $1, %lo($CPI39_0)
+; MIPS32R5EL-NEXT:    ld.w $w0, 0($1)
+; MIPS32R5EL-NEXT:    copy_s.w $1, $w0[3]
+; MIPS32R5EL-NEXT:    lui $2, %hi($CPI39_1)
+; MIPS32R5EL-NEXT:    addiu $2, $2, %lo($CPI39_1)
+; MIPS32R5EL-NEXT:    ld.w $w0, 0($2)
+; MIPS32R5EL-NEXT:    copy_s.w $2, $w0[1]
+; MIPS32R5EL-NEXT:    copy_s.w $3, $w0[3]
+; MIPS32R5EL-NEXT:    sw $3, 36($sp)
+; MIPS32R5EL-NEXT:    sw $2, 28($sp)
+; MIPS32R5EL-NEXT:    sw $1, 20($sp)
+; MIPS32R5EL-NEXT:    sw $zero, 32($sp)
+; MIPS32R5EL-NEXT:    sw $zero, 24($sp)
+; MIPS32R5EL-NEXT:    sw $zero, 16($sp)
+; MIPS32R5EL-NEXT:    addiu $4, $sp, 48
+; MIPS32R5EL-NEXT:    addiu $6, $zero, 0
+; MIPS32R5EL-NEXT:    addiu $7, $zero, 0
+; MIPS32R5EL-NEXT:    jal double2_extern
+; MIPS32R5EL-NEXT:    nop
+; MIPS32R5EL-NEXT:    lui $1, %hi(gv2f64)
+; MIPS32R5EL-NEXT:    addiu $1, $1, %lo(gv2f64)
+; MIPS32R5EL-NEXT:    ld.d $w0, 48($sp)
+; MIPS32R5EL-NEXT:    st.d $w0, 0($1)
+; MIPS32R5EL-NEXT:    move $sp, $fp
+; MIPS32R5EL-NEXT:    lw $fp, 72($sp) # 4-byte Folded Reload
+; MIPS32R5EL-NEXT:    lw $ra, 76($sp) # 4-byte Folded Reload
+; MIPS32R5EL-NEXT:    addiu $sp, $sp, 80
+; MIPS32R5EL-NEXT:    jr $ra
+; MIPS32R5EL-NEXT:    nop
 entry:
   %0 = call <2 x double> @double2_extern(<2 x double> <double 0.0, double -1.0>, <2 x double> <double 12.0, double 14.0>)
   store <2 x double> %0, ptr @gv2f64

diff  --git a/llvm/test/CodeGen/SPARC/float-constants.ll b/llvm/test/CodeGen/SPARC/float-constants.ll
index 873c1f98bdc4b6..8424bf216dd4ef 100644
--- a/llvm/test/CodeGen/SPARC/float-constants.ll
+++ b/llvm/test/CodeGen/SPARC/float-constants.ll
@@ -14,9 +14,9 @@ define <2 x i32> @bitcast() nounwind {
 ;
 ; CHECK-LE-LABEL: bitcast:
 ; CHECK-LE:       ! %bb.0:
-; CHECK-LE-NEXT:    mov %g0, %o0
-; CHECK-LE-NEXT:    retl
 ; CHECK-LE-NEXT:    sethi 1049856, %o1
+; CHECK-LE-NEXT:    retl
+; CHECK-LE-NEXT:    mov %g0, %o0
   %1 = bitcast double 5.0 to <2 x i32>
   ret <2 x i32> %1
 }


        


More information about the llvm-commits mailing list