[llvm] 0d6e647 - [PowerPC] Update P10 vector insert patterns to use refactored load/stores, and update handling of v4f32 vector insert.

Amy Kwan via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 1 06:48:46 PST 2022


Author: Amy Kwan
Date: 2022-02-01T08:48:37-06:00
New Revision: 0d6e64755acf0334f9a3958c254b32ac95aa859b

URL: https://github.com/llvm/llvm-project/commit/0d6e64755acf0334f9a3958c254b32ac95aa859b
DIFF: https://github.com/llvm/llvm-project/commit/0d6e64755acf0334f9a3958c254b32ac95aa859b.diff

LOG: [PowerPC] Update P10 vector insert patterns to use refactored load/stores, and update handling of v4f32 vector insert.

This patch updates the P10 patterns with a load feeding into an insertelt to
utilize the refactored load and store infrastructure, as well as updating any
tests that exhibit any codegen changes.

Furthermore, custom legalization is added for v4f32 on Power9 and above to not
only assist with adjusting the refactored load/stores for P10 vector insert,
but also it enables the utilization of direct moves.

Differential Revision: https://reviews.llvm.org/D115691

Added: 
    

Modified: 
    llvm/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/lib/Target/PowerPC/PPCInstrPrefix.td
    llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
    llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
    llvm/test/CodeGen/PowerPC/vec_insert_elt.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 90479eea1ad64..cbeae0ab03b83 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1252,7 +1252,6 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
         setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Legal);
         setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Legal);
         setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal);
-        setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Legal);
       } else {
         setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
         setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
@@ -10763,6 +10762,26 @@ SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
   if (VT == MVT::v2f64 && C)
     return Op;
 
+  if (Subtarget.hasP9Vector()) {
+    // A f32 load feeding into a v4f32 insert_vector_elt is handled in this way
+    // because on P10, it allows this specific insert_vector_elt load pattern to
+    // utilize the refactored load and store infrastructure in order to exploit
+    // prefixed loads.
+    // On targets with inexpensive direct moves (Power9 and up), a
+    // (insert_vector_elt v4f32:$vec, (f32 load)) is always better as an integer
+    // load since a single precision load will involve conversion to double
+    // precision on the load followed by another conversion to single precision.
+    if ((VT == MVT::v4f32) && (V2.getValueType() == MVT::f32) &&
+        (isa<LoadSDNode>(V2))) {
+      SDValue BitcastVector = DAG.getBitcast(MVT::v4i32, V1);
+      SDValue BitcastLoad = DAG.getBitcast(MVT::i32, V2);
+      SDValue InsVecElt =
+          DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32, BitcastVector,
+                      BitcastLoad, Op.getOperand(2));
+      return DAG.getBitcast(MVT::v4f32, InsVecElt);
+    }
+  }
+
   if (Subtarget.isISA3_1()) {
     if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64())
       return SDValue();

diff  --git a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
index fe354208533ba..ff43426dd1ef3 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
@@ -2816,32 +2816,20 @@ let Predicates = [IsISA3_1, HasVSX, IsLittleEndian] in {
 
   def : Pat<(v4f32 (insertelt v4f32:$vDi, f32:$rA, i64:$rB)),
             (VINSWVRX $vDi, InsertEltShift.Sub32Left2, (XSCVDPSPN $rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddr:$rA)), i64:$rB)),
-            (VINSWRX $vDi, InsertEltShift.Sub32Left2, (LWZ memri:$rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddrX34:$rA)), i64:$rB)),
-            (VINSWRX $vDi, InsertEltShift.Sub32Left2, (PLWZ memri34:$rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load xaddr:$rA)), i64:$rB)),
-            (VINSWRX $vDi, InsertEltShift.Sub32Left2, (LWZX memrr:$rA))>;
 
   def : Pat<(v2f64 (insertelt v2f64:$vDi,  f64:$A, i64:$rB)),
             (VINSDRX $vDi, InsertEltShift.Left3, Bitcast.DblToLong)>;
-  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load iaddrX4:$rA)), i64:$rB)),
+  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load DSForm:$rA)), i64:$rB)),
             (VINSDRX $vDi, InsertEltShift.Left3, (LD memrix:$rA))>;
-  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load iaddrX34:$rA)), i64:$rB)),
+  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load PDForm:$rA)), i64:$rB)),
             (VINSDRX $vDi, InsertEltShift.Left3, (PLD memri34:$rA))>;
-  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load xaddrX4:$rA)), i64:$rB)),
+  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load XForm:$rA)), i64:$rB)),
             (VINSDRX $vDi, InsertEltShift.Left3, (LDX memrr:$rA))>;
   let AddedComplexity = 400 in {
     // Immediate vector insert element
     foreach Idx = [0, 1, 2, 3] in {
       def : Pat<(v4i32 (insertelt v4i32:$vDi, i32:$rA, Idx)),
                 (VINSW $vDi, !mul(!sub(3, Idx), 4), $rA)>;
-      def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddr:$rA)), Idx)),
-                (VINSW $vDi, !mul(!sub(3, Idx), 4), (LWZ memri:$rA))>;
-      def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddrX34:$rA)), Idx)),
-                (VINSW $vDi, !mul(!sub(3, Idx), 4), (PLWZ memri34:$rA))>;
-      def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load xaddr:$rA)), Idx)),
-                (VINSW $vDi, !mul(!sub(3, Idx), 4), (LWZX memrr:$rA))>;
     }
     foreach i = [0, 1] in
      def : Pat<(v2i64 (insertelt v2i64:$vDi, i64:$rA, (i64 i))),
@@ -2860,12 +2848,6 @@ let Predicates = [IsISA3_1, HasVSX, IsBigEndian, IsPPC32] in {
 
   def : Pat<(v4f32 (insertelt v4f32:$vDi,  f32:$rA, i32:$rB)),
             (VINSWVLX $vDi, InsertEltShift.Left2, (XSCVDPSPN $rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddr:$rA)), i32:$rB)),
-            (VINSWLX v4f32:$vDi, InsertEltShift.Left2, (LWZ memri:$rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddrX34:$rA)), i32:$rB)),
-            (VINSWLX v4f32:$vDi, InsertEltShift.Left2, (PLWZ memri34:$rA))>;
-  def: Pat<(v4f32(insertelt v4f32 : $vDi, (f32(load xaddr : $rA)), i32 : $rB)),
-           (VINSWLX v4f32 : $vDi, InsertEltShift.Left2, (LWZX memrr : $rA))>;
 }
 
 let Predicates = [IsISA3_1, HasVSX, IsBigEndian, IsPPC64] in {
@@ -2881,20 +2863,14 @@ let Predicates = [IsISA3_1, HasVSX, IsBigEndian, IsPPC64] in {
 
   def : Pat<(v4f32 (insertelt v4f32:$vDi,  f32:$rA, i64:$rB)),
             (VINSWVLX $vDi, InsertEltShift.Sub32Left2, (XSCVDPSPN $rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddr:$rA)), i64:$rB)),
-            (VINSWLX $vDi, InsertEltShift.Sub32Left2, (LWZ memri:$rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddrX34:$rA)), i64:$rB)),
-            (VINSWLX $vDi, InsertEltShift.Sub32Left2, (PLWZ memri34:$rA))>;
-  def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load xaddr:$rA)), i64:$rB)),
-            (VINSWLX $vDi, InsertEltShift.Sub32Left2, (LWZX memrr:$rA))>;
 
   def : Pat<(v2f64 (insertelt v2f64:$vDi,  f64:$A, i64:$rB)),
             (VINSDLX $vDi, InsertEltShift.Left3, Bitcast.DblToLong)>;
-  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load iaddrX4:$rA)), i64:$rB)),
+  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load DSForm:$rA)), i64:$rB)),
             (VINSDLX $vDi, InsertEltShift.Left3, (LD memrix:$rA))>;
-  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load iaddrX34:$rA)), i64:$rB)),
+  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load PDForm:$rA)), i64:$rB)),
             (VINSDLX $vDi, InsertEltShift.Left3, (PLD memri34:$rA))>;
-  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load xaddrX4:$rA)), i64:$rB)),
+  def : Pat<(v2f64 (insertelt v2f64:$vDi, (f64 (load XForm:$rA)), i64:$rB)),
             (VINSDLX $vDi, InsertEltShift.Left3, (LDX memrr:$rA))>;
 }
 
@@ -2904,15 +2880,6 @@ let AddedComplexity = 400, Predicates = [IsISA3_1, HasVSX, IsBigEndian] in {
     foreach Idx = [0, 1, 2, 3] in {
       def : Pat<(v4i32 (insertelt v4i32:$vDi, i32:$rA, (Ty Idx))),
                (VINSW $vDi, !mul(Idx, 4), $rA)>;
-      def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddr:$rA)),
-                                  (Ty Idx))),
-               (VINSW $vDi, !mul(Idx, 4), (LWZ memri:$rA))>;
-      def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load iaddrX34:$rA)),
-                                  (Ty Idx))),
-               (VINSW $vDi, !mul(Idx, 4), (PLWZ memri34:$rA))>;
-      def : Pat<(v4f32 (insertelt v4f32:$vDi, (f32 (load xaddr:$rA)),
-                                  (Ty Idx))),
-               (VINSW $vDi, !mul(Idx, 4), (LWZX memrr:$rA))>;
     }
   }
 

diff  --git a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
index e62c8285fe893..f5ebc75b067c1 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
@@ -306,32 +306,32 @@ define <4 x float> @testFloat2(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 ze
 ; CHECK-64:       # %bb.0: # %entry
 ; CHECK-64-NEXT:    lwz 6, 0(3)
 ; CHECK-64-DAG:     rlwinm 4, 4, 2, 28, 29
-; CHECK-64-DAG:     addi 7, 1, -32
-; CHECK-64-NEXT:    stxv 34, -32(1)
+; CHECK-64-DAG:     addi 7, 1, -16
+; CHECK-64-NEXT:    stxv 34, -16(1)
 ; CHECK-64-NEXT:    stwx 6, 7, 4
 ; CHECK-64-NEXT:    rlwinm 4, 5, 2, 28, 29
-; CHECK-64-NEXT:    addi 5, 1, -16
-; CHECK-64-NEXT:    lxv 0, -32(1)
+; CHECK-64-NEXT:    addi 5, 1, -32
+; CHECK-64-NEXT:    lxv 0, -16(1)
 ; CHECK-64-NEXT:    lwz 3, 1(3)
-; CHECK-64-NEXT:    stxv 0, -16(1)
+; CHECK-64-NEXT:    stxv 0, -32(1)
 ; CHECK-64-NEXT:    stwx 3, 5, 4
-; CHECK-64-NEXT:    lxv 34, -16(1)
+; CHECK-64-NEXT:    lxv 34, -32(1)
 ; CHECK-64-NEXT:    blr
 ;
 ; CHECK-32-LABEL: testFloat2:
 ; CHECK-32:       # %bb.0: # %entry
 ; CHECK-32-NEXT:    lwz 6, 0(3)
-; CHECK-32-NEXT:    addi 7, 1, -32
+; CHECK-32-NEXT:    addi 7, 1, -16
 ; CHECK-32-NEXT:    rlwinm 4, 4, 2, 28, 29
-; CHECK-32-NEXT:    stxv 34, -32(1)
+; CHECK-32-NEXT:    stxv 34, -16(1)
 ; CHECK-32-NEXT:    rlwinm 5, 5, 2, 28, 29
 ; CHECK-32-NEXT:    stwx 6, 7, 4
-; CHECK-32-NEXT:    addi 4, 1, -16
-; CHECK-32-NEXT:    lxv 0, -32(1)
+; CHECK-32-NEXT:    addi 4, 1, -48
+; CHECK-32-NEXT:    lxv 0, -16(1)
 ; CHECK-32-NEXT:    lwz 3, 1(3)
-; CHECK-32-NEXT:    stxv 0, -16(1)
+; CHECK-32-NEXT:    stxv 0, -48(1)
 ; CHECK-32-NEXT:    stwx 3, 4, 5
-; CHECK-32-NEXT:    lxv 34, -16(1)
+; CHECK-32-NEXT:    lxv 34, -48(1)
 ; CHECK-32-NEXT:    blr
 ;
 ; CHECK-64-P10-LABEL: testFloat2:
@@ -371,36 +371,36 @@ define <4 x float> @testFloat3(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 ze
 ; CHECK-64:       # %bb.0: # %entry
 ; CHECK-64-NEXT:    lis 6, 1
 ; CHECK-64-DAG:         rlwinm 4, 4, 2, 28, 29
-; CHECK-64-DAG:    addi 7, 1, -32
+; CHECK-64-DAG:    addi 7, 1, -16
 ; CHECK-64-NEXT:    lwzx 6, 3, 6
-; CHECK-64-NEXT:    stxv 34, -32(1)
+; CHECK-64-NEXT:    stxv 34, -16(1)
 ; CHECK-64-NEXT:    stwx 6, 7, 4
 ; CHECK-64-NEXT:    li 4, 1
-; CHECK-64-NEXT:    lxv 0, -32(1)
+; CHECK-64-NEXT:    lxv 0, -16(1)
 ; CHECK-64-NEXT:    rldic 4, 4, 36, 27
 ; CHECK-64-NEXT:    lwzx 3, 3, 4
 ; CHECK-64-NEXT:    rlwinm 4, 5, 2, 28, 29
-; CHECK-64-NEXT:    addi 5, 1, -16
-; CHECK-64-NEXT:    stxv 0, -16(1)
+; CHECK-64-NEXT:    addi 5, 1, -32
+; CHECK-64-NEXT:    stxv 0, -32(1)
 ; CHECK-64-NEXT:    stwx 3, 5, 4
-; CHECK-64-NEXT:    lxv 34, -16(1)
+; CHECK-64-NEXT:    lxv 34, -32(1)
 ; CHECK-64-NEXT:    blr
 ;
 ; CHECK-32-LABEL: testFloat3:
 ; CHECK-32:       # %bb.0: # %entry
 ; CHECK-32-NEXT:    lis 6, 1
-; CHECK-32-NEXT:    addi 7, 1, -32
+; CHECK-32-NEXT:    addi 7, 1, -16
 ; CHECK-32-NEXT:    rlwinm 4, 4, 2, 28, 29
 ; CHECK-32-NEXT:    rlwinm 5, 5, 2, 28, 29
 ; CHECK-32-NEXT:    lwzx 6, 3, 6
-; CHECK-32-NEXT:    stxv 34, -32(1)
+; CHECK-32-NEXT:    stxv 34, -16(1)
 ; CHECK-32-NEXT:    stwx 6, 7, 4
-; CHECK-32-NEXT:    addi 4, 1, -16
-; CHECK-32-NEXT:    lxv 0, -32(1)
+; CHECK-32-NEXT:    addi 4, 1, -48
+; CHECK-32-NEXT:    lxv 0, -16(1)
 ; CHECK-32-NEXT:    lwz 3, 0(3)
-; CHECK-32-NEXT:    stxv 0, -16(1)
+; CHECK-32-NEXT:    stxv 0, -48(1)
 ; CHECK-32-NEXT:    stwx 3, 4, 5
-; CHECK-32-NEXT:    lxv 34, -16(1)
+; CHECK-32-NEXT:    lxv 34, -48(1)
 ; CHECK-32-NEXT:    blr
 ;
 ; CHECK-64-P10-LABEL: testFloat3:
@@ -419,10 +419,9 @@ define <4 x float> @testFloat3(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 ze
 ;
 ; CHECK-32-P10-LABEL: testFloat3:
 ; CHECK-32-P10:       # %bb.0: # %entry
-; CHECK-32-P10-NEXT:    lis 6, 1
-; CHECK-32-P10-NEXT:    slwi 4, 4, 2
-; CHECK-32-P10-NEXT:    lwzx 6, 3, 6
+; CHECK-32-P10-NEXT:    plwz 6, 65536(3), 0
 ; CHECK-32-P10-NEXT:    lwz 3, 0(3)
+; CHECK-32-P10-NEXT:    slwi 4, 4, 2
 ; CHECK-32-P10-NEXT:    vinswlx 2, 4, 6
 ; CHECK-32-P10-NEXT:    slwi 4, 5, 2
 ; CHECK-32-P10-NEXT:    vinswlx 2, 4, 3
@@ -478,21 +477,21 @@ entry:
 define <4 x float> @testFloatImm2(<4 x float> %a, i32* %b) {
 ; CHECK-64-LABEL: testFloatImm2:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    lfs 0, 0(3)
-; CHECK-64-NEXT:    xscvdpspn 0, 0
+; CHECK-64-NEXT:    lwz 4, 0(3)
+; CHECK-64-NEXT:    lwz 3, 4(3)
+; CHECK-64-NEXT:    mtfprwz 0, 4
 ; CHECK-64-NEXT:    xxinsertw 34, 0, 0
-; CHECK-64-NEXT:    lfs 0, 4(3)
-; CHECK-64-NEXT:    xscvdpspn 0, 0
+; CHECK-64-NEXT:    mtfprwz 0, 3
 ; CHECK-64-NEXT:    xxinsertw 34, 0, 8
 ; CHECK-64-NEXT:    blr
 ;
 ; CHECK-32-LABEL: testFloatImm2:
 ; CHECK-32:       # %bb.0: # %entry
-; CHECK-32-NEXT:    lfs 0, 0(3)
-; CHECK-32-NEXT:    xscvdpspn 0, 0
+; CHECK-32-NEXT:    lwz 4, 0(3)
+; CHECK-32-NEXT:    lwz 3, 4(3)
+; CHECK-32-NEXT:    mtfprwz 0, 4
 ; CHECK-32-NEXT:    xxinsertw 34, 0, 0
-; CHECK-32-NEXT:    lfs 0, 4(3)
-; CHECK-32-NEXT:    xscvdpspn 0, 0
+; CHECK-32-NEXT:    mtfprwz 0, 3
 ; CHECK-32-NEXT:    xxinsertw 34, 0, 8
 ; CHECK-32-NEXT:    blr
 ;
@@ -526,24 +525,24 @@ define <4 x float> @testFloatImm3(<4 x float> %a, i32* %b) {
 ; CHECK-64-LABEL: testFloatImm3:
 ; CHECK-64:       # %bb.0: # %entry
 ; CHECK-64-NEXT:    lis 4, 4
-; CHECK-64-NEXT:    lfsx 0, 3, 4
+; CHECK-64-NEXT:    lwzx 4, 3, 4
+; CHECK-64-NEXT:    mtfprwz 0, 4
 ; CHECK-64-NEXT:    li 4, 1
 ; CHECK-64-NEXT:    rldic 4, 4, 38, 25
-; CHECK-64-NEXT:    xscvdpspn 0, 0
 ; CHECK-64-NEXT:    xxinsertw 34, 0, 0
-; CHECK-64-NEXT:    lfsx 0, 3, 4
-; CHECK-64-NEXT:    xscvdpspn 0, 0
+; CHECK-64-NEXT:    lwzx 3, 3, 4
+; CHECK-64-NEXT:    mtfprwz 0, 3
 ; CHECK-64-NEXT:    xxinsertw 34, 0, 8
 ; CHECK-64-NEXT:    blr
 ;
 ; CHECK-32-LABEL: testFloatImm3:
 ; CHECK-32:       # %bb.0: # %entry
 ; CHECK-32-NEXT:    lis 4, 4
-; CHECK-32-NEXT:    lfsx 0, 3, 4
-; CHECK-32-NEXT:    xscvdpspn 0, 0
+; CHECK-32-NEXT:    lwzx 4, 3, 4
+; CHECK-32-NEXT:    lwz 3, 0(3)
+; CHECK-32-NEXT:    mtfprwz 0, 4
 ; CHECK-32-NEXT:    xxinsertw 34, 0, 0
-; CHECK-32-NEXT:    lfs 0, 0(3)
-; CHECK-32-NEXT:    xscvdpspn 0, 0
+; CHECK-32-NEXT:    mtfprwz 0, 3
 ; CHECK-32-NEXT:    xxinsertw 34, 0, 8
 ; CHECK-32-NEXT:    blr
 ;
@@ -559,8 +558,7 @@ define <4 x float> @testFloatImm3(<4 x float> %a, i32* %b) {
 ;
 ; CHECK-32-P10-LABEL: testFloatImm3:
 ; CHECK-32-P10:       # %bb.0: # %entry
-; CHECK-32-P10-NEXT:    lis 4, 4
-; CHECK-32-P10-NEXT:    lwzx 4, 3, 4
+; CHECK-32-P10-NEXT:    plwz 4, 262144(3), 0
 ; CHECK-32-P10-NEXT:    lwz 3, 0(3)
 ; CHECK-32-P10-NEXT:    vinsw 2, 4, 0
 ; CHECK-32-P10-NEXT:    vinsw 2, 3, 8

diff  --git a/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll b/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
index 9a893bed82aac..e6427880c5e8a 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
@@ -214,15 +214,15 @@ entry:
 define <4 x float> @s2v_test_f1(float* nocapture readonly %f64, <4 x float> %vec)  {
 ; P9LE-LABEL: s2v_test_f1:
 ; P9LE:       # %bb.0: # %entry
-; P9LE-NEXT:    lfs f0, 0(r3)
-; P9LE-NEXT:    xscvdpspn vs0, f0
+; P9LE-NEXT:    lwz r3, 0(r3)
+; P9LE-NEXT:    mtfprwz f0, r3
 ; P9LE-NEXT:    xxinsertw v2, vs0, 12
 ; P9LE-NEXT:    blr
 ;
 ; P9BE-LABEL: s2v_test_f1:
 ; P9BE:       # %bb.0: # %entry
-; P9BE-NEXT:    lfs f0, 0(r3)
-; P9BE-NEXT:    xscvdpspn vs0, f0
+; P9BE-NEXT:    lwz r3, 0(r3)
+; P9BE-NEXT:    mtfprwz f0, r3
 ; P9BE-NEXT:    xxinsertw v2, vs0, 0
 ; P9BE-NEXT:    blr
 ;

diff  --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
index 18e8eda1209c0..9bbb09ac3a966 100644
--- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
@@ -255,16 +255,16 @@ define <4 x float> @testFloat2(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 ze
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    lwz r3, 0(r5)
 ; CHECK-P9-NEXT:    rlwinm r4, r6, 2, 28, 29
-; CHECK-P9-NEXT:    addi r6, r1, -32
-; CHECK-P9-NEXT:    stxv v2, -32(r1)
+; CHECK-P9-NEXT:    addi r6, r1, -16
+; CHECK-P9-NEXT:    stxv v2, -16(r1)
 ; CHECK-P9-NEXT:    stwx r3, r6, r4
 ; CHECK-P9-NEXT:    rlwinm r4, r7, 2, 28, 29
-; CHECK-P9-NEXT:    lxv vs0, -32(r1)
+; CHECK-P9-NEXT:    lxv vs0, -16(r1)
 ; CHECK-P9-NEXT:    lwz r3, 1(r5)
-; CHECK-P9-NEXT:    addi r5, r1, -16
-; CHECK-P9-NEXT:    stxv vs0, -16(r1)
+; CHECK-P9-NEXT:    addi r5, r1, -32
+; CHECK-P9-NEXT:    stxv vs0, -32(r1)
 ; CHECK-P9-NEXT:    stwx r3, r5, r4
-; CHECK-P9-NEXT:    lxv v2, -16(r1)
+; CHECK-P9-NEXT:    lxv v2, -32(r1)
 ; CHECK-P9-NEXT:    blr
 entry:
   %0 = bitcast i8* %b to float*
@@ -310,19 +310,19 @@ define <4 x float> @testFloat3(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 ze
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    lis r3, 1
 ; CHECK-P9-NEXT:    rlwinm r4, r6, 2, 28, 29
-; CHECK-P9-NEXT:    addi r6, r1, -32
+; CHECK-P9-NEXT:    addi r6, r1, -16
 ; CHECK-P9-NEXT:    lwzx r3, r5, r3
-; CHECK-P9-NEXT:    stxv v2, -32(r1)
+; CHECK-P9-NEXT:    stxv v2, -16(r1)
 ; CHECK-P9-NEXT:    stwx r3, r6, r4
 ; CHECK-P9-NEXT:    li r3, 1
 ; CHECK-P9-NEXT:    rlwinm r4, r7, 2, 28, 29
-; CHECK-P9-NEXT:    lxv vs0, -32(r1)
+; CHECK-P9-NEXT:    lxv vs0, -16(r1)
 ; CHECK-P9-NEXT:    rldic r3, r3, 36, 27
 ; CHECK-P9-NEXT:    lwzx r3, r5, r3
-; CHECK-P9-NEXT:    addi r5, r1, -16
-; CHECK-P9-NEXT:    stxv vs0, -16(r1)
+; CHECK-P9-NEXT:    addi r5, r1, -32
+; CHECK-P9-NEXT:    stxv vs0, -32(r1)
 ; CHECK-P9-NEXT:    stwx r3, r5, r4
-; CHECK-P9-NEXT:    lxv v2, -16(r1)
+; CHECK-P9-NEXT:    lxv v2, -32(r1)
 ; CHECK-P9-NEXT:    blr
 entry:
   %add.ptr = getelementptr inbounds i8, i8* %b, i64 65536
@@ -384,11 +384,11 @@ define <4 x float> @testFloatImm2(<4 x float> %a, i32* %b) {
 ;
 ; CHECK-P9-LABEL: testFloatImm2:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    lfs f0, 0(r5)
-; CHECK-P9-NEXT:    xscvdpspn vs0, f0
+; CHECK-P9-NEXT:    lwz r3, 0(r5)
+; CHECK-P9-NEXT:    mtfprwz f0, r3
+; CHECK-P9-NEXT:    lwz r3, 4(r5)
 ; CHECK-P9-NEXT:    xxinsertw v2, vs0, 0
-; CHECK-P9-NEXT:    lfs f0, 4(r5)
-; CHECK-P9-NEXT:    xscvdpspn vs0, f0
+; CHECK-P9-NEXT:    mtfprwz f0, r3
 ; CHECK-P9-NEXT:    xxinsertw v2, vs0, 8
 ; CHECK-P9-NEXT:    blr
 entry:
@@ -426,13 +426,13 @@ define <4 x float> @testFloatImm3(<4 x float> %a, i32* %b) {
 ; CHECK-P9-LABEL: testFloatImm3:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    lis r3, 4
-; CHECK-P9-NEXT:    lfsx f0, r5, r3
+; CHECK-P9-NEXT:    lwzx r3, r5, r3
+; CHECK-P9-NEXT:    mtfprwz f0, r3
 ; CHECK-P9-NEXT:    li r3, 1
 ; CHECK-P9-NEXT:    rldic r3, r3, 38, 25
-; CHECK-P9-NEXT:    xscvdpspn vs0, f0
 ; CHECK-P9-NEXT:    xxinsertw v2, vs0, 0
-; CHECK-P9-NEXT:    lfsx f0, r5, r3
-; CHECK-P9-NEXT:    xscvdpspn vs0, f0
+; CHECK-P9-NEXT:    lwzx r3, r5, r3
+; CHECK-P9-NEXT:    mtfprwz f0, r3
 ; CHECK-P9-NEXT:    xxinsertw v2, vs0, 8
 ; CHECK-P9-NEXT:    blr
 entry:


        


More information about the llvm-commits mailing list