[llvm] 8f48625 - Revert "[PowerPC] extend smaller splats into bigger splats (#141282)"

Hubert Tong via llvm-commits llvm-commits at lists.llvm.org
Wed May 28 22:11:01 PDT 2025


Author: Hubert Tong
Date: 2025-05-29T01:10:55-04:00
New Revision: 8f486254e4fb03e30c0140aa207b10140ccbdd32

URL: https://github.com/llvm/llvm-project/commit/8f486254e4fb03e30c0140aa207b10140ccbdd32
DIFF: https://github.com/llvm/llvm-project/commit/8f486254e4fb03e30c0140aa207b10140ccbdd32.diff

LOG: Revert "[PowerPC] extend smaller splats into bigger splats (#141282)"

The subject commit causes the build to ICE on AIX:
https://lab.llvm.org/buildbot/#/builders/64/builds/3890/steps/5/logs/stdio

This reverts commit 7fa365843d9f99e75c38a6107e8511b324950e74.

Added: 
    

Modified: 
    llvm/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/test/CodeGen/PowerPC/build-vector-tests.ll
    llvm/test/CodeGen/PowerPC/mul-const-vector.ll
    llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll
    llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
    llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
    llvm/test/CodeGen/PowerPC/vector-extend-sign.ll
    llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll

Removed: 
    llvm/test/CodeGen/PowerPC/splat-extend.ll


################################################################################
diff  --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index fa54b114afe8c..0c2a506005604 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -9667,25 +9667,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
     }
   }
 
-  bool IsSplat64 = false;
-  uint64_t SplatBits = 0;
-  int32_t SextVal = 0;
-  if (BVNIsConstantSplat) {
-    if (SplatBitSize <= 32) {
-      SplatBits = APSplatBits.getZExtValue();
-      SextVal = SignExtend32(SplatBits, SplatBitSize);
-    } else if (SplatBitSize == 64) {
-      int64_t Splat64Val = APSplatBits.getSExtValue();
-      SplatBits = (uint64_t)Splat64Val;
-      SextVal = (int32_t)SplatBits;
-      bool P9Vector = Subtarget.hasP9Vector();
-      int32_t Hi = P9Vector ? 127 : 15;
-      int32_t Lo = P9Vector ? -128 : -16;
-      IsSplat64 = Splat64Val >= Lo && Splat64Val <= Hi;
-    }
-  }
-
-  if (!BVNIsConstantSplat || (SplatBitSize > 32 && !IsSplat64)) {
+  if (!BVNIsConstantSplat || SplatBitSize > 32) {
     unsigned NewOpcode = PPCISD::LD_SPLAT;
 
     // Handle load-and-splat patterns as we have instructions that will do this
@@ -9771,6 +9753,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
     return SDValue();
   }
 
+  uint64_t SplatBits = APSplatBits.getZExtValue();
   uint64_t SplatUndef = APSplatUndef.getZExtValue();
   unsigned SplatSize = SplatBitSize / 8;
 
@@ -9805,37 +9788,13 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
                                   dl);
 
   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
-  // Use VSPLTIW/VUPKLSW for v2i64 in range [-16,15].
-  if (SextVal >= -16 && SextVal <= 15) {
-    unsigned UseSize = SplatSize == 8 ? 4 : SplatSize;
-    SDValue Res =
-        getCanonicalConstSplat(SextVal, UseSize, Op.getValueType(), DAG, dl);
-    if (SplatSize != 8)
-      return Res;
-    return BuildIntrinsicOp(Intrinsic::ppc_altivec_vupklsw, Res, DAG, dl);
-  }
+  int32_t SextVal = SignExtend32(SplatBits, SplatBitSize);
+  if (SextVal >= -16 && SextVal <= 15)
+    return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
+                                  dl);
 
   // Two instruction sequences.
 
-  if (Subtarget.hasP9Vector() && SextVal >= -128 && SextVal <= 127) {
-    SDValue C = DAG.getConstant((unsigned char)SextVal, dl, MVT::i32);
-    SmallVector<SDValue, 16> Ops(16, C);
-    SDValue BV = DAG.getBuildVector(MVT::v16i8, dl, Ops);
-    assert((SplatSize == 2 || SplatSize == 4 || SplatSize == 8) &&
-           "Unexpected type for vector constant.");
-    unsigned IID;
-    if (SplatSize == 2) {
-      IID = Intrinsic::ppc_altivec_vupklsb;
-    } else if (SplatSize == 4) {
-      IID = Intrinsic::ppc_altivec_vextsb2w;
-    } else { // SplatSize == 8
-      IID = Intrinsic::ppc_altivec_vextsb2d;
-    }
-    SDValue Extend = BuildIntrinsicOp(IID, BV, DAG, dl);
-    return DAG.getBitcast(Op->getValueType(0), Extend);
-  }
-  assert(!IsSplat64 && "Unhandled 64-bit splat pattern");
-
   // If this value is in the range [-32,30] and is even, use:
   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
   // If this value is in the range [17,31] and is odd, use:

diff  --git a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
index 9dd0fbe4474b1..91431ed15f6a7 100644
--- a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -3713,26 +3713,30 @@ entry:
 define <2 x i64> @spltConst1ll() {
 ; P9BE-LABEL: spltConst1ll:
 ; P9BE:       # %bb.0: # %entry
-; P9BE-NEXT:    vspltisw v2, 1
-; P9BE-NEXT:    vupklsw v2, v2
+; P9BE-NEXT:    addis r3, r2, .LCPI65_0 at toc@ha
+; P9BE-NEXT:    addi r3, r3, .LCPI65_0 at toc@l
+; P9BE-NEXT:    lxv v2, 0(r3)
 ; P9BE-NEXT:    blr
 ;
 ; P9LE-LABEL: spltConst1ll:
 ; P9LE:       # %bb.0: # %entry
-; P9LE-NEXT:    vspltisw v2, 1
-; P9LE-NEXT:    vupklsw v2, v2
+; P9LE-NEXT:    addis r3, r2, .LCPI65_0 at toc@ha
+; P9LE-NEXT:    addi r3, r3, .LCPI65_0 at toc@l
+; P9LE-NEXT:    lxv v2, 0(r3)
 ; P9LE-NEXT:    blr
 ;
 ; P8BE-LABEL: spltConst1ll:
 ; P8BE:       # %bb.0: # %entry
-; P8BE-NEXT:    vspltisw v2, 1
-; P8BE-NEXT:    vupklsw v2, v2
+; P8BE-NEXT:    addis r3, r2, .LCPI65_0 at toc@ha
+; P8BE-NEXT:    addi r3, r3, .LCPI65_0 at toc@l
+; P8BE-NEXT:    lxvd2x v2, 0, r3
 ; P8BE-NEXT:    blr
 ;
 ; P8LE-LABEL: spltConst1ll:
 ; P8LE:       # %bb.0: # %entry
-; P8LE-NEXT:    vspltisw v2, 1
-; P8LE-NEXT:    vupklsw v2, v2
+; P8LE-NEXT:    addis r3, r2, .LCPI65_0 at toc@ha
+; P8LE-NEXT:    addi r3, r3, .LCPI65_0 at toc@l
+; P8LE-NEXT:    lxvd2x v2, 0, r3
 ; P8LE-NEXT:    blr
 entry:
   ret <2 x i64> <i64 1, i64 1>
@@ -4169,26 +4173,30 @@ entry:
 define <2 x i64> @spltCnstConvftoll() {
 ; P9BE-LABEL: spltCnstConvftoll:
 ; P9BE:       # %bb.0: # %entry
-; P9BE-NEXT:    vspltisw v2, 4
-; P9BE-NEXT:    vupklsw v2, v2
+; P9BE-NEXT:    addis r3, r2, .LCPI78_0 at toc@ha
+; P9BE-NEXT:    addi r3, r3, .LCPI78_0 at toc@l
+; P9BE-NEXT:    lxv v2, 0(r3)
 ; P9BE-NEXT:    blr
 ;
 ; P9LE-LABEL: spltCnstConvftoll:
 ; P9LE:       # %bb.0: # %entry
-; P9LE-NEXT:    vspltisw v2, 4
-; P9LE-NEXT:    vupklsw v2, v2
+; P9LE-NEXT:    addis r3, r2, .LCPI78_0 at toc@ha
+; P9LE-NEXT:    addi r3, r3, .LCPI78_0 at toc@l
+; P9LE-NEXT:    lxv v2, 0(r3)
 ; P9LE-NEXT:    blr
 ;
 ; P8BE-LABEL: spltCnstConvftoll:
 ; P8BE:       # %bb.0: # %entry
-; P8BE-NEXT:    vspltisw v2, 4
-; P8BE-NEXT:    vupklsw v2, v2
+; P8BE-NEXT:    addis r3, r2, .LCPI78_0 at toc@ha
+; P8BE-NEXT:    addi r3, r3, .LCPI78_0 at toc@l
+; P8BE-NEXT:    lxvd2x v2, 0, r3
 ; P8BE-NEXT:    blr
 ;
 ; P8LE-LABEL: spltCnstConvftoll:
 ; P8LE:       # %bb.0: # %entry
-; P8LE-NEXT:    vspltisw v2, 4
-; P8LE-NEXT:    vupklsw v2, v2
+; P8LE-NEXT:    addis r3, r2, .LCPI78_0 at toc@ha
+; P8LE-NEXT:    addi r3, r3, .LCPI78_0 at toc@l
+; P8LE-NEXT:    lxvd2x v2, 0, r3
 ; P8LE-NEXT:    blr
 entry:
   ret <2 x i64> <i64 4, i64 4>
@@ -4518,26 +4526,30 @@ entry:
 define <2 x i64> @spltCnstConvdtoll() {
 ; P9BE-LABEL: spltCnstConvdtoll:
 ; P9BE:       # %bb.0: # %entry
-; P9BE-NEXT:    vspltisw v2, 4
-; P9BE-NEXT:    vupklsw v2, v2
+; P9BE-NEXT:    addis r3, r2, .LCPI87_0 at toc@ha
+; P9BE-NEXT:    addi r3, r3, .LCPI87_0 at toc@l
+; P9BE-NEXT:    lxv v2, 0(r3)
 ; P9BE-NEXT:    blr
 ;
 ; P9LE-LABEL: spltCnstConvdtoll:
 ; P9LE:       # %bb.0: # %entry
-; P9LE-NEXT:    vspltisw v2, 4
-; P9LE-NEXT:    vupklsw v2, v2
+; P9LE-NEXT:    addis r3, r2, .LCPI87_0 at toc@ha
+; P9LE-NEXT:    addi r3, r3, .LCPI87_0 at toc@l
+; P9LE-NEXT:    lxv v2, 0(r3)
 ; P9LE-NEXT:    blr
 ;
 ; P8BE-LABEL: spltCnstConvdtoll:
 ; P8BE:       # %bb.0: # %entry
-; P8BE-NEXT:    vspltisw v2, 4
-; P8BE-NEXT:    vupklsw v2, v2
+; P8BE-NEXT:    addis r3, r2, .LCPI87_0 at toc@ha
+; P8BE-NEXT:    addi r3, r3, .LCPI87_0 at toc@l
+; P8BE-NEXT:    lxvd2x v2, 0, r3
 ; P8BE-NEXT:    blr
 ;
 ; P8LE-LABEL: spltCnstConvdtoll:
 ; P8LE:       # %bb.0: # %entry
-; P8LE-NEXT:    vspltisw v2, 4
-; P8LE-NEXT:    vupklsw v2, v2
+; P8LE-NEXT:    addis r3, r2, .LCPI87_0 at toc@ha
+; P8LE-NEXT:    addi r3, r3, .LCPI87_0 at toc@l
+; P8LE-NEXT:    lxvd2x v2, 0, r3
 ; P8LE-NEXT:    blr
 entry:
   ret <2 x i64> <i64 4, i64 4>
@@ -4867,26 +4879,30 @@ entry:
 define <2 x i64> @spltConst1ull() {
 ; P9BE-LABEL: spltConst1ull:
 ; P9BE:       # %bb.0: # %entry
-; P9BE-NEXT:    vspltisw v2, 1
-; P9BE-NEXT:    vupklsw v2, v2
+; P9BE-NEXT:    addis r3, r2, .LCPI97_0 at toc@ha
+; P9BE-NEXT:    addi r3, r3, .LCPI97_0 at toc@l
+; P9BE-NEXT:    lxv v2, 0(r3)
 ; P9BE-NEXT:    blr
 ;
 ; P9LE-LABEL: spltConst1ull:
 ; P9LE:       # %bb.0: # %entry
-; P9LE-NEXT:    vspltisw v2, 1
-; P9LE-NEXT:    vupklsw v2, v2
+; P9LE-NEXT:    addis r3, r2, .LCPI97_0 at toc@ha
+; P9LE-NEXT:    addi r3, r3, .LCPI97_0 at toc@l
+; P9LE-NEXT:    lxv v2, 0(r3)
 ; P9LE-NEXT:    blr
 ;
 ; P8BE-LABEL: spltConst1ull:
 ; P8BE:       # %bb.0: # %entry
-; P8BE-NEXT:    vspltisw v2, 1
-; P8BE-NEXT:    vupklsw v2, v2
+; P8BE-NEXT:    addis r3, r2, .LCPI97_0 at toc@ha
+; P8BE-NEXT:    addi r3, r3, .LCPI97_0 at toc@l
+; P8BE-NEXT:    lxvd2x v2, 0, r3
 ; P8BE-NEXT:    blr
 ;
 ; P8LE-LABEL: spltConst1ull:
 ; P8LE:       # %bb.0: # %entry
-; P8LE-NEXT:    vspltisw v2, 1
-; P8LE-NEXT:    vupklsw v2, v2
+; P8LE-NEXT:    addis r3, r2, .LCPI97_0 at toc@ha
+; P8LE-NEXT:    addi r3, r3, .LCPI97_0 at toc@l
+; P8LE-NEXT:    lxvd2x v2, 0, r3
 ; P8LE-NEXT:    blr
 entry:
   ret <2 x i64> <i64 1, i64 1>
@@ -5323,26 +5339,30 @@ entry:
 define <2 x i64> @spltCnstConvftoull() {
 ; P9BE-LABEL: spltCnstConvftoull:
 ; P9BE:       # %bb.0: # %entry
-; P9BE-NEXT:    vspltisw v2, 4
-; P9BE-NEXT:    vupklsw v2, v2
+; P9BE-NEXT:    addis r3, r2, .LCPI110_0 at toc@ha
+; P9BE-NEXT:    addi r3, r3, .LCPI110_0 at toc@l
+; P9BE-NEXT:    lxv v2, 0(r3)
 ; P9BE-NEXT:    blr
 ;
 ; P9LE-LABEL: spltCnstConvftoull:
 ; P9LE:       # %bb.0: # %entry
-; P9LE-NEXT:    vspltisw v2, 4
-; P9LE-NEXT:    vupklsw v2, v2
+; P9LE-NEXT:    addis r3, r2, .LCPI110_0 at toc@ha
+; P9LE-NEXT:    addi r3, r3, .LCPI110_0 at toc@l
+; P9LE-NEXT:    lxv v2, 0(r3)
 ; P9LE-NEXT:    blr
 ;
 ; P8BE-LABEL: spltCnstConvftoull:
 ; P8BE:       # %bb.0: # %entry
-; P8BE-NEXT:    vspltisw v2, 4
-; P8BE-NEXT:    vupklsw v2, v2
+; P8BE-NEXT:    addis r3, r2, .LCPI110_0 at toc@ha
+; P8BE-NEXT:    addi r3, r3, .LCPI110_0 at toc@l
+; P8BE-NEXT:    lxvd2x v2, 0, r3
 ; P8BE-NEXT:    blr
 ;
 ; P8LE-LABEL: spltCnstConvftoull:
 ; P8LE:       # %bb.0: # %entry
-; P8LE-NEXT:    vspltisw v2, 4
-; P8LE-NEXT:    vupklsw v2, v2
+; P8LE-NEXT:    addis r3, r2, .LCPI110_0 at toc@ha
+; P8LE-NEXT:    addi r3, r3, .LCPI110_0 at toc@l
+; P8LE-NEXT:    lxvd2x v2, 0, r3
 ; P8LE-NEXT:    blr
 entry:
   ret <2 x i64> <i64 4, i64 4>
@@ -5672,26 +5692,30 @@ entry:
 define <2 x i64> @spltCnstConvdtoull() {
 ; P9BE-LABEL: spltCnstConvdtoull:
 ; P9BE:       # %bb.0: # %entry
-; P9BE-NEXT:    vspltisw v2, 4
-; P9BE-NEXT:    vupklsw v2, v2
+; P9BE-NEXT:    addis r3, r2, .LCPI119_0 at toc@ha
+; P9BE-NEXT:    addi r3, r3, .LCPI119_0 at toc@l
+; P9BE-NEXT:    lxv v2, 0(r3)
 ; P9BE-NEXT:    blr
 ;
 ; P9LE-LABEL: spltCnstConvdtoull:
 ; P9LE:       # %bb.0: # %entry
-; P9LE-NEXT:    vspltisw v2, 4
-; P9LE-NEXT:    vupklsw v2, v2
+; P9LE-NEXT:    addis r3, r2, .LCPI119_0 at toc@ha
+; P9LE-NEXT:    addi r3, r3, .LCPI119_0 at toc@l
+; P9LE-NEXT:    lxv v2, 0(r3)
 ; P9LE-NEXT:    blr
 ;
 ; P8BE-LABEL: spltCnstConvdtoull:
 ; P8BE:       # %bb.0: # %entry
-; P8BE-NEXT:    vspltisw v2, 4
-; P8BE-NEXT:    vupklsw v2, v2
+; P8BE-NEXT:    addis r3, r2, .LCPI119_0 at toc@ha
+; P8BE-NEXT:    addi r3, r3, .LCPI119_0 at toc@l
+; P8BE-NEXT:    lxvd2x v2, 0, r3
 ; P8BE-NEXT:    blr
 ;
 ; P8LE-LABEL: spltCnstConvdtoull:
 ; P8LE:       # %bb.0: # %entry
-; P8LE-NEXT:    vspltisw v2, 4
-; P8LE-NEXT:    vupklsw v2, v2
+; P8LE-NEXT:    addis r3, r2, .LCPI119_0 at toc@ha
+; P8LE-NEXT:    addi r3, r3, .LCPI119_0 at toc@l
+; P8LE-NEXT:    lxvd2x v2, 0, r3
 ; P8LE-NEXT:    blr
 entry:
   ret <2 x i64> <i64 4, i64 4>

diff  --git a/llvm/test/CodeGen/PowerPC/mul-const-vector.ll b/llvm/test/CodeGen/PowerPC/mul-const-vector.ll
index 2d67de0d4477c..e3d231adf734f 100644
--- a/llvm/test/CodeGen/PowerPC/mul-const-vector.ll
+++ b/llvm/test/CodeGen/PowerPC/mul-const-vector.ll
@@ -271,7 +271,8 @@ define <2 x i64> @test1_v2i64(<2 x i64> %a) {
         ret <2 x i64> %tmp.1
 }
 ; CHECK-LABEL: test1_v2i64:
-; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
+; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
+; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
 ; CHECK-NOT: vmul
 ; CHECK-NEXT: vsld v{{[0-9]+}}, v2, v[[REG2]]
 
@@ -281,7 +282,8 @@ define <2 x i64> @test2_v2i64(<2 x i64> %a) {
 }
 
 ; CHECK-LABEL: test2_v2i64:
-; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
+; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
+; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
 ; CHECK-NOT: vmul
 ; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
 ; CHECK-NEXT: vaddudm v{{[0-9]+}}, v2, v[[REG3]]
@@ -292,7 +294,8 @@ define <2 x i64> @test3_v2i64(<2 x i64> %a) {
 }
 
 ; CHECK-LABEL: test3_v2i64:
-; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
+; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
+; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
 ; CHECK-NOT: vmul
 ; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
 ; CHECK-NEXT: vsubudm v{{[0-9]+}}, v[[REG3]], v2
@@ -305,7 +308,8 @@ define <2 x i64> @test4_v2i64(<2 x i64> %a) {
 }
 
 ; CHECK-LABEL: test4_v2i64:
-; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
+; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
+; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
 ; CHECK-NOT: vmul
 ; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
 ; CHECK-P8-NEXT: xxlxor v[[REG4:[0-9]+]],
@@ -318,7 +322,8 @@ define <2 x i64> @test5_v2i64(<2 x i64> %a) {
 }
 
 ; CHECK-LABEL: test5_v2i64:
-; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
+; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
+; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
 ; CHECK-NOT: vmul
 ; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
 ; CHECK-NEXT: vaddudm v[[REG4:[0-9]+]], v2, v[[REG3]]
@@ -332,7 +337,8 @@ define <2 x i64> @test6_v2i64(<2 x i64> %a) {
 }
 
 ; CHECK-LABEL: test6_v2i64:
-; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
+; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
+; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
 ; CHECK-NOT: vmul
 ; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
 ; CHECK-NEXT: vsubudm v{{[0-9]+}}, v2, v[[REG3]]

diff  --git a/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll b/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll
index 1ab74e6cb1cee..842cb929541cf 100644
--- a/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll
@@ -105,8 +105,9 @@ define dso_local <2 x double> @testDoubleToDoubleNaNFail() local_unnamed_addr {
 ;
 ; CHECK-NOPREFIX-LABEL: testDoubleToDoubleNaNFail:
 ; CHECK-NOPREFIX:       # %bb.0: # %entry
-; CHECK-NOPREFIX-NEXT:    vspltisw v2, -16
-; CHECK-NOPREFIX-NEXT:    vupklsw v2, v2
+; CHECK-NOPREFIX-NEXT:    addis r3, r2, .LCPI2_0 at toc@ha
+; CHECK-NOPREFIX-NEXT:    addi r3, r3, .LCPI2_0 at toc@l
+; CHECK-NOPREFIX-NEXT:    lxv vs34, 0(r3)
 ; CHECK-NOPREFIX-NEXT:    blr
 ;
 ; CHECK-BE-LABEL: testDoubleToDoubleNaNFail:

diff  --git a/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll b/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
index 6b29c780de600..4435484ae0b94 100644
--- a/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
+++ b/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
@@ -22,10 +22,10 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9LE-NEXT:    lfdx 0, 3, 4
 ; P9LE-NEXT:    addis 3, 2, .LCPI0_0 at toc@ha
 ; P9LE-NEXT:    xxlxor 2, 2, 2
-; P9LE-NEXT:    xxspltib 4, 16
+; P9LE-NEXT:    vspltisw 4, 8
 ; P9LE-NEXT:    lxsd 3, 4(5)
 ; P9LE-NEXT:    addi 3, 3, .LCPI0_0 at toc@l
-; P9LE-NEXT:    vextsb2w 4, 4
+; P9LE-NEXT:    vadduwm 4, 4, 4
 ; P9LE-NEXT:    lxv 1, 0(3)
 ; P9LE-NEXT:    addis 3, 2, .LCPI0_1 at toc@ha
 ; P9LE-NEXT:    addi 3, 3, .LCPI0_1 at toc@l
@@ -45,10 +45,10 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-NEXT:    lxsdx 2, 3, 4
 ; P9BE-NEXT:    addis 3, 2, .LCPI0_0 at toc@ha
 ; P9BE-NEXT:    xxlxor 1, 1, 1
-; P9BE-NEXT:    xxspltib 4, 16
+; P9BE-NEXT:    vspltisw 4, 8
 ; P9BE-NEXT:    lxsd 3, 4(5)
 ; P9BE-NEXT:    addi 3, 3, .LCPI0_0 at toc@l
-; P9BE-NEXT:    vextsb2w 4, 4
+; P9BE-NEXT:    vadduwm 4, 4, 4
 ; P9BE-NEXT:    lxv 0, 0(3)
 ; P9BE-NEXT:    addis 3, 2, .LCPI0_1 at toc@ha
 ; P9BE-NEXT:    addi 3, 3, .LCPI0_1 at toc@l
@@ -68,11 +68,11 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-AIX-NEXT:    lxsdx 2, 3, 4
 ; P9BE-AIX-NEXT:    ld 3, L..C0(2) # %const.0
 ; P9BE-AIX-NEXT:    xxlxor 1, 1, 1
-; P9BE-AIX-NEXT:    xxspltib 4, 16
+; P9BE-AIX-NEXT:    vspltisw 4, 8
 ; P9BE-AIX-NEXT:    lxsd 3, 4(5)
 ; P9BE-AIX-NEXT:    lxv 0, 0(3)
 ; P9BE-AIX-NEXT:    ld 3, L..C1(2) # %const.1
-; P9BE-AIX-NEXT:    vextsb2w 4, 4
+; P9BE-AIX-NEXT:    vadduwm 4, 4, 4
 ; P9BE-AIX-NEXT:    xxperm 2, 1, 0
 ; P9BE-AIX-NEXT:    lxv 0, 0(3)
 ; P9BE-AIX-NEXT:    xxperm 3, 3, 0
@@ -89,10 +89,10 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-AIX32-NEXT:    lxvwsx 0, 3, 4
 ; P9BE-AIX32-NEXT:    li 3, 4
 ; P9BE-AIX32-NEXT:    xxlxor 2, 2, 2
-; P9BE-AIX32-NEXT:    xxspltib 4, 16
+; P9BE-AIX32-NEXT:    vspltisw 4, 8
 ; P9BE-AIX32-NEXT:    lxvwsx 1, 5, 3
 ; P9BE-AIX32-NEXT:    lwz 3, L..C0(2) # %const.0
-; P9BE-AIX32-NEXT:    vextsb2w 4, 4
+; P9BE-AIX32-NEXT:    vadduwm 4, 4, 4
 ; P9BE-AIX32-NEXT:    xxmrghw 2, 0, 1
 ; P9BE-AIX32-NEXT:    lxv 0, 0(3)
 ; P9BE-AIX32-NEXT:    li 3, 8
@@ -137,11 +137,11 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9LE-NEXT:    lxsiwzx 2, 3, 4
 ; P9LE-NEXT:    addis 3, 2, .LCPI1_0 at toc@ha
 ; P9LE-NEXT:    xxlxor 0, 0, 0
-; P9LE-NEXT:    xxspltib 4, 16
+; P9LE-NEXT:    vspltisw 4, 8
 ; P9LE-NEXT:    addi 3, 3, .LCPI1_0 at toc@l
 ; P9LE-NEXT:    lxv 1, 0(3)
 ; P9LE-NEXT:    li 3, 4
-; P9LE-NEXT:    vextsb2w 4, 4
+; P9LE-NEXT:    vadduwm 4, 4, 4
 ; P9LE-NEXT:    lxsiwzx 3, 5, 3
 ; P9LE-NEXT:    xxperm 2, 0, 1
 ; P9LE-NEXT:    xxperm 3, 0, 1
@@ -158,11 +158,11 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-NEXT:    lxsiwzx 2, 3, 4
 ; P9BE-NEXT:    addis 3, 2, .LCPI1_0 at toc@ha
 ; P9BE-NEXT:    xxlxor 0, 0, 0
-; P9BE-NEXT:    xxspltib 4, 16
+; P9BE-NEXT:    vspltisw 4, 8
 ; P9BE-NEXT:    addi 3, 3, .LCPI1_0 at toc@l
 ; P9BE-NEXT:    lxv 1, 0(3)
 ; P9BE-NEXT:    li 3, 4
-; P9BE-NEXT:    vextsb2w 4, 4
+; P9BE-NEXT:    vadduwm 4, 4, 4
 ; P9BE-NEXT:    lxsiwzx 3, 5, 3
 ; P9BE-NEXT:    xxperm 2, 0, 1
 ; P9BE-NEXT:    xxperm 3, 0, 1
@@ -179,10 +179,10 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-AIX-NEXT:    lxsiwzx 2, 3, 4
 ; P9BE-AIX-NEXT:    ld 3, L..C2(2) # %const.0
 ; P9BE-AIX-NEXT:    xxlxor 0, 0, 0
-; P9BE-AIX-NEXT:    xxspltib 4, 16
+; P9BE-AIX-NEXT:    vspltisw 4, 8
 ; P9BE-AIX-NEXT:    lxv 1, 0(3)
 ; P9BE-AIX-NEXT:    li 3, 4
-; P9BE-AIX-NEXT:    vextsb2w 4, 4
+; P9BE-AIX-NEXT:    vadduwm 4, 4, 4
 ; P9BE-AIX-NEXT:    lxsiwzx 3, 5, 3
 ; P9BE-AIX-NEXT:    xxperm 2, 0, 1
 ; P9BE-AIX-NEXT:    xxperm 3, 0, 1
@@ -199,10 +199,10 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-AIX32-NEXT:    lxsiwzx 2, 3, 4
 ; P9BE-AIX32-NEXT:    lwz 3, L..C2(2) # %const.0
 ; P9BE-AIX32-NEXT:    xxlxor 0, 0, 0
-; P9BE-AIX32-NEXT:    xxspltib 4, 16
+; P9BE-AIX32-NEXT:    vspltisw 4, 8
 ; P9BE-AIX32-NEXT:    lxv 1, 0(3)
 ; P9BE-AIX32-NEXT:    li 3, 4
-; P9BE-AIX32-NEXT:    vextsb2w 4, 4
+; P9BE-AIX32-NEXT:    vadduwm 4, 4, 4
 ; P9BE-AIX32-NEXT:    lxsiwzx 3, 5, 3
 ; P9BE-AIX32-NEXT:    xxperm 2, 0, 1
 ; P9BE-AIX32-NEXT:    xxperm 3, 0, 1

diff  --git a/llvm/test/CodeGen/PowerPC/splat-extend.ll b/llvm/test/CodeGen/PowerPC/splat-extend.ll
deleted file mode 100644
index 4be55468a25dc..0000000000000
--- a/llvm/test/CodeGen/PowerPC/splat-extend.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
-; RUN:   -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
-; RUN:   FileCheck %s
-; RUN: llc -verify-machineinstrs  -mtriple=powerpc64-aix-xcoff \
-; RUN:   -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
-; RUN:   FileCheck %s
-; RUN: llc -verify-machineinstrs  -mtriple=powerpc-aix-xcoff \
-; RUN:   -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
-; RUN:   FileCheck %s
-
-define dso_local noundef <8 x i16> @v103s() local_unnamed_addr #0 {
-; CHECK-LABEL: v103s:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xxspltib v2, 103
-; CHECK-NEXT:    vupklsb v2, v2
-; CHECK-NEXT:    blr
-entry:
-  ret <8 x i16> splat (i16 103)
-}
-
-define dso_local noundef <2 x i64> @v103l() local_unnamed_addr #0 {
-; CHECK-LABEL: v103l:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xxspltib v2, 103
-; CHECK-NEXT:    vextsb2d v2, v2
-; CHECK-NEXT:    blr
-entry:
-  ret <2 x i64> splat (i64 103)
-}
-
-define dso_local noundef <4 x i32> @v103i() local_unnamed_addr #0 {
-; CHECK-LABEL: v103i:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xxspltib v2, 103
-; CHECK-NEXT:    vextsb2w v2, v2
-; CHECK-NEXT:    blr
-entry:
-  ret <4 x i32> splat (i32 103)
-}
-
-define dso_local noundef <2 x i64> @v11l() local_unnamed_addr #0 {
-; CHECK-LABEL: v11l:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vspltisw v2, -11
-; CHECK-NEXT:    vupklsw v2, v2
-; CHECK-NEXT:    blr
-entry:
-  ret <2 x i64> splat (i64 -11)
-}

diff  --git a/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll b/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
index 210aee13486c3..3f7e0b694ce4d 100644
--- a/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
@@ -16,8 +16,9 @@ define <2 x i64> @test_add(<2 x i64> %x, <2 x i64> %y) nounwind {
 define <2 x i64> @increment_by_one(<2 x i64> %x) nounwind {
 ; VSX-LABEL: increment_by_one:
 ; VSX:       # %bb.0:
-; VSX-NEXT:    vspltisw 3, 1
-; VSX-NEXT:    vupklsw 3, 3
+; VSX-NEXT:    addis 3, 2, .LCPI1_0 at toc@ha
+; VSX-NEXT:    addi 3, 3, .LCPI1_0 at toc@l
+; VSX-NEXT:    lxvd2x 35, 0, 3
 ; VSX-NEXT:    vaddudm 2, 2, 3
 ; VSX-NEXT:    blr
 ;

diff  --git a/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll b/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll
index ef3988e20e1f0..540a00fa84c5d 100644
--- a/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll
@@ -144,8 +144,9 @@ entry:
 define <2 x i64> @test_none(<2 x i64> %m) {
 ; CHECK-P9-LABEL: test_none:
 ; CHECK-P9:       # %bb.0: # %entry
-; CHECK-P9-NEXT:    xxspltib 35, 16
-; CHECK-P9-NEXT:    vextsb2d 3, 3
+; CHECK-P9-NEXT:    addis 3, 2, .LCPI5_0 at toc@ha
+; CHECK-P9-NEXT:    addi 3, 3, .LCPI5_0 at toc@l
+; CHECK-P9-NEXT:    lxv 35, 0(3)
 ; CHECK-P9-NEXT:    vsld 2, 2, 3
 ; CHECK-P9-NEXT:    vsrad 2, 2, 3
 ; CHECK-P9-NEXT:    blr

diff  --git a/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll b/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll
index 43cbc62e0bb1c..04351346745b3 100644
--- a/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll
@@ -7838,9 +7838,9 @@ define <4 x i32> @ult_16_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_16_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 16
+; PWR9-NEXT:    vspltisw 3, 8
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -7974,9 +7974,9 @@ define <4 x i32> @ugt_16_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_16_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 16
+; PWR9-NEXT:    vspltisw 3, 8
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8108,9 +8108,10 @@ define <4 x i32> @ult_17_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_17_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 17
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 1
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8242,9 +8243,10 @@ define <4 x i32> @ugt_17_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_17_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 17
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 1
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8378,9 +8380,9 @@ define <4 x i32> @ult_18_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_18_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 18
+; PWR9-NEXT:    vspltisw 3, 9
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8514,9 +8516,9 @@ define <4 x i32> @ugt_18_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_18_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 18
+; PWR9-NEXT:    vspltisw 3, 9
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8651,9 +8653,10 @@ define <4 x i32> @ult_19_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_19_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 19
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 3
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8788,9 +8791,10 @@ define <4 x i32> @ugt_19_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_19_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 19
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 3
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8924,9 +8928,9 @@ define <4 x i32> @ult_20_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_20_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 20
+; PWR9-NEXT:    vspltisw 3, 10
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9060,9 +9064,9 @@ define <4 x i32> @ugt_20_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_20_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 20
+; PWR9-NEXT:    vspltisw 3, 10
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9197,9 +9201,10 @@ define <4 x i32> @ult_21_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_21_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 21
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 5
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9334,9 +9339,10 @@ define <4 x i32> @ugt_21_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_21_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 21
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 5
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9470,9 +9476,9 @@ define <4 x i32> @ult_22_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_22_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 22
+; PWR9-NEXT:    vspltisw 3, 11
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9606,9 +9612,9 @@ define <4 x i32> @ugt_22_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_22_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 22
+; PWR9-NEXT:    vspltisw 3, 11
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9743,9 +9749,10 @@ define <4 x i32> @ult_23_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_23_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 23
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 7
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9880,9 +9887,10 @@ define <4 x i32> @ugt_23_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_23_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 23
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 7
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10010,9 +10018,9 @@ define <4 x i32> @ult_24_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_24_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 24
+; PWR9-NEXT:    vspltisw 3, 12
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10140,9 +10148,9 @@ define <4 x i32> @ugt_24_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_24_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 24
+; PWR9-NEXT:    vspltisw 3, 12
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10277,9 +10285,10 @@ define <4 x i32> @ult_25_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_25_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 25
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 9
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10414,9 +10423,10 @@ define <4 x i32> @ugt_25_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_25_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 25
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 9
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10550,9 +10560,9 @@ define <4 x i32> @ult_26_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_26_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 26
+; PWR9-NEXT:    vspltisw 3, 13
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10686,9 +10696,9 @@ define <4 x i32> @ugt_26_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_26_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 26
+; PWR9-NEXT:    vspltisw 3, 13
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10823,9 +10833,10 @@ define <4 x i32> @ult_27_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_27_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 27
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 11
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10960,9 +10971,10 @@ define <4 x i32> @ugt_27_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_27_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 27
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 11
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11096,9 +11108,9 @@ define <4 x i32> @ult_28_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_28_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 28
+; PWR9-NEXT:    vspltisw 3, 14
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11232,9 +11244,9 @@ define <4 x i32> @ugt_28_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_28_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 28
+; PWR9-NEXT:    vspltisw 3, 14
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11369,9 +11381,10 @@ define <4 x i32> @ult_29_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_29_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 29
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 13
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11506,9 +11519,10 @@ define <4 x i32> @ugt_29_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_29_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 29
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 13
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11642,9 +11656,9 @@ define <4 x i32> @ult_30_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_30_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 30
+; PWR9-NEXT:    vspltisw 3, 15
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11778,9 +11792,9 @@ define <4 x i32> @ugt_30_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ugt_30_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 30
+; PWR9-NEXT:    vspltisw 3, 15
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vadduwm 3, 3, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11915,9 +11929,10 @@ define <4 x i32> @ult_31_v4i32(<4 x i32> %0) {
 ;
 ; PWR9-LABEL: ult_31_v4i32:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 31
+; PWR9-NEXT:    vspltisw 3, -16
+; PWR9-NEXT:    vspltisw 4, 15
 ; PWR9-NEXT:    vpopcntw 2, 2
-; PWR9-NEXT:    vextsb2w 3, 3
+; PWR9-NEXT:    vsubuwm 3, 4, 3
 ; PWR9-NEXT:    vcmpgtuw 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11976,17 +11991,19 @@ define <2 x i64> @ugt_1_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_1_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 1
+; PWR8-NEXT:    addis 3, 2, .LCPI100_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI100_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_1_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 1
+; PWR9-NEXT:    addis 3, 2, .LCPI100_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI100_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12044,17 +12061,19 @@ define <2 x i64> @ult_2_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_2_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 2
+; PWR8-NEXT:    addis 3, 2, .LCPI101_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI101_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_2_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 2
+; PWR9-NEXT:    addis 3, 2, .LCPI101_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI101_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12176,17 +12195,19 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_2_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 2
+; PWR8-NEXT:    addis 3, 2, .LCPI102_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI102_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_2_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 2
+; PWR9-NEXT:    addis 3, 2, .LCPI102_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI102_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12308,17 +12329,19 @@ define <2 x i64> @ult_3_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_3_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 3
+; PWR8-NEXT:    addis 3, 2, .LCPI103_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI103_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_3_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 3
+; PWR9-NEXT:    addis 3, 2, .LCPI103_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI103_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12440,17 +12463,19 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_3_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 3
+; PWR8-NEXT:    addis 3, 2, .LCPI104_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI104_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_3_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 3
+; PWR9-NEXT:    addis 3, 2, .LCPI104_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI104_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12572,17 +12597,19 @@ define <2 x i64> @ult_4_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_4_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 4
+; PWR8-NEXT:    addis 3, 2, .LCPI105_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI105_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_4_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 4
+; PWR9-NEXT:    addis 3, 2, .LCPI105_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI105_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12704,17 +12731,19 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_4_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 4
+; PWR8-NEXT:    addis 3, 2, .LCPI106_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI106_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_4_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 4
+; PWR9-NEXT:    addis 3, 2, .LCPI106_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI106_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12836,17 +12865,19 @@ define <2 x i64> @ult_5_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_5_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 5
+; PWR8-NEXT:    addis 3, 2, .LCPI107_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI107_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_5_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 5
+; PWR9-NEXT:    addis 3, 2, .LCPI107_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI107_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12968,17 +12999,19 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_5_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 5
+; PWR8-NEXT:    addis 3, 2, .LCPI108_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI108_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_5_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 5
+; PWR9-NEXT:    addis 3, 2, .LCPI108_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI108_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13100,17 +13133,19 @@ define <2 x i64> @ult_6_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_6_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 6
+; PWR8-NEXT:    addis 3, 2, .LCPI109_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI109_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_6_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 6
+; PWR9-NEXT:    addis 3, 2, .LCPI109_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI109_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13232,17 +13267,19 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_6_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 6
+; PWR8-NEXT:    addis 3, 2, .LCPI110_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI110_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_6_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 6
+; PWR9-NEXT:    addis 3, 2, .LCPI110_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI110_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13364,17 +13401,19 @@ define <2 x i64> @ult_7_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_7_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 7
+; PWR8-NEXT:    addis 3, 2, .LCPI111_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI111_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_7_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 7
+; PWR9-NEXT:    addis 3, 2, .LCPI111_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI111_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13496,17 +13535,19 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_7_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 7
+; PWR8-NEXT:    addis 3, 2, .LCPI112_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI112_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_7_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 7
+; PWR9-NEXT:    addis 3, 2, .LCPI112_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI112_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13628,17 +13669,19 @@ define <2 x i64> @ult_8_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_8_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 8
+; PWR8-NEXT:    addis 3, 2, .LCPI113_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI113_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_8_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 8
+; PWR9-NEXT:    addis 3, 2, .LCPI113_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI113_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13760,17 +13803,19 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_8_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 8
+; PWR8-NEXT:    addis 3, 2, .LCPI114_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI114_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_8_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 8
+; PWR9-NEXT:    addis 3, 2, .LCPI114_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI114_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13892,17 +13937,19 @@ define <2 x i64> @ult_9_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_9_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 9
+; PWR8-NEXT:    addis 3, 2, .LCPI115_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI115_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_9_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 9
+; PWR9-NEXT:    addis 3, 2, .LCPI115_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI115_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14024,17 +14071,19 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_9_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 9
+; PWR8-NEXT:    addis 3, 2, .LCPI116_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI116_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_9_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 9
+; PWR9-NEXT:    addis 3, 2, .LCPI116_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI116_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14156,17 +14205,19 @@ define <2 x i64> @ult_10_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_10_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 10
+; PWR8-NEXT:    addis 3, 2, .LCPI117_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI117_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_10_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 10
+; PWR9-NEXT:    addis 3, 2, .LCPI117_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI117_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14288,17 +14339,19 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_10_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 10
+; PWR8-NEXT:    addis 3, 2, .LCPI118_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI118_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_10_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 10
+; PWR9-NEXT:    addis 3, 2, .LCPI118_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI118_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14420,17 +14473,19 @@ define <2 x i64> @ult_11_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_11_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 11
+; PWR8-NEXT:    addis 3, 2, .LCPI119_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI119_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_11_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 11
+; PWR9-NEXT:    addis 3, 2, .LCPI119_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI119_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14552,17 +14607,19 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_11_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 11
+; PWR8-NEXT:    addis 3, 2, .LCPI120_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI120_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_11_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 11
+; PWR9-NEXT:    addis 3, 2, .LCPI120_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI120_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14684,17 +14741,19 @@ define <2 x i64> @ult_12_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_12_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 12
+; PWR8-NEXT:    addis 3, 2, .LCPI121_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI121_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_12_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 12
+; PWR9-NEXT:    addis 3, 2, .LCPI121_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI121_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14816,17 +14875,19 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_12_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 12
+; PWR8-NEXT:    addis 3, 2, .LCPI122_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI122_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_12_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 12
+; PWR9-NEXT:    addis 3, 2, .LCPI122_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI122_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14948,17 +15009,19 @@ define <2 x i64> @ult_13_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_13_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 13
+; PWR8-NEXT:    addis 3, 2, .LCPI123_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI123_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_13_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 13
+; PWR9-NEXT:    addis 3, 2, .LCPI123_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI123_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15080,17 +15143,19 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_13_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 13
+; PWR8-NEXT:    addis 3, 2, .LCPI124_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI124_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_13_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 13
+; PWR9-NEXT:    addis 3, 2, .LCPI124_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI124_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15212,17 +15277,19 @@ define <2 x i64> @ult_14_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_14_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 14
+; PWR8-NEXT:    addis 3, 2, .LCPI125_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI125_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_14_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 14
+; PWR9-NEXT:    addis 3, 2, .LCPI125_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI125_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15344,17 +15411,19 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_14_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 14
+; PWR8-NEXT:    addis 3, 2, .LCPI126_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI126_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_14_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 14
+; PWR9-NEXT:    addis 3, 2, .LCPI126_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI126_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15476,17 +15545,19 @@ define <2 x i64> @ult_15_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ult_15_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 15
+; PWR8-NEXT:    addis 3, 2, .LCPI127_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI127_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 3, 2
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ult_15_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 15
+; PWR9-NEXT:    addis 3, 2, .LCPI127_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI127_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15608,17 +15679,19 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
 ;
 ; PWR8-LABEL: ugt_15_v2i64:
 ; PWR8:       # %bb.0:
-; PWR8-NEXT:    vspltisw 3, 15
+; PWR8-NEXT:    addis 3, 2, .LCPI128_0 at toc@ha
 ; PWR8-NEXT:    vpopcntd 2, 2
-; PWR8-NEXT:    vupklsw 3, 3
+; PWR8-NEXT:    addi 3, 3, .LCPI128_0 at toc@l
+; PWR8-NEXT:    lxvd2x 35, 0, 3
 ; PWR8-NEXT:    vcmpgtud 2, 2, 3
 ; PWR8-NEXT:    blr
 ;
 ; PWR9-LABEL: ugt_15_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    vspltisw 3, 15
+; PWR9-NEXT:    addis 3, 2, .LCPI128_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vupklsw 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI128_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15749,9 +15822,10 @@ define <2 x i64> @ult_16_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_16_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 16
+; PWR9-NEXT:    addis 3, 2, .LCPI129_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI129_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15882,9 +15956,10 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_16_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 16
+; PWR9-NEXT:    addis 3, 2, .LCPI130_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI130_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16015,9 +16090,10 @@ define <2 x i64> @ult_17_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_17_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 17
+; PWR9-NEXT:    addis 3, 2, .LCPI131_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI131_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16148,9 +16224,10 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_17_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 17
+; PWR9-NEXT:    addis 3, 2, .LCPI132_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI132_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16281,9 +16358,10 @@ define <2 x i64> @ult_18_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_18_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 18
+; PWR9-NEXT:    addis 3, 2, .LCPI133_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI133_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16414,9 +16492,10 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_18_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 18
+; PWR9-NEXT:    addis 3, 2, .LCPI134_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI134_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16547,9 +16626,10 @@ define <2 x i64> @ult_19_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_19_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 19
+; PWR9-NEXT:    addis 3, 2, .LCPI135_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI135_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16680,9 +16760,10 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_19_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 19
+; PWR9-NEXT:    addis 3, 2, .LCPI136_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI136_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16813,9 +16894,10 @@ define <2 x i64> @ult_20_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_20_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 20
+; PWR9-NEXT:    addis 3, 2, .LCPI137_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI137_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16946,9 +17028,10 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_20_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 20
+; PWR9-NEXT:    addis 3, 2, .LCPI138_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI138_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17079,9 +17162,10 @@ define <2 x i64> @ult_21_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_21_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 21
+; PWR9-NEXT:    addis 3, 2, .LCPI139_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI139_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17212,9 +17296,10 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_21_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 21
+; PWR9-NEXT:    addis 3, 2, .LCPI140_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI140_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17345,9 +17430,10 @@ define <2 x i64> @ult_22_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_22_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 22
+; PWR9-NEXT:    addis 3, 2, .LCPI141_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI141_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17478,9 +17564,10 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_22_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 22
+; PWR9-NEXT:    addis 3, 2, .LCPI142_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI142_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17611,9 +17698,10 @@ define <2 x i64> @ult_23_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_23_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 23
+; PWR9-NEXT:    addis 3, 2, .LCPI143_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI143_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17744,9 +17832,10 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_23_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 23
+; PWR9-NEXT:    addis 3, 2, .LCPI144_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI144_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17877,9 +17966,10 @@ define <2 x i64> @ult_24_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_24_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 24
+; PWR9-NEXT:    addis 3, 2, .LCPI145_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI145_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18010,9 +18100,10 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_24_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 24
+; PWR9-NEXT:    addis 3, 2, .LCPI146_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI146_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18143,9 +18234,10 @@ define <2 x i64> @ult_25_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_25_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 25
+; PWR9-NEXT:    addis 3, 2, .LCPI147_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI147_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18276,9 +18368,10 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_25_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 25
+; PWR9-NEXT:    addis 3, 2, .LCPI148_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI148_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18409,9 +18502,10 @@ define <2 x i64> @ult_26_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_26_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 26
+; PWR9-NEXT:    addis 3, 2, .LCPI149_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI149_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18542,9 +18636,10 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_26_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 26
+; PWR9-NEXT:    addis 3, 2, .LCPI150_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI150_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18675,9 +18770,10 @@ define <2 x i64> @ult_27_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_27_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 27
+; PWR9-NEXT:    addis 3, 2, .LCPI151_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI151_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18808,9 +18904,10 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_27_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 27
+; PWR9-NEXT:    addis 3, 2, .LCPI152_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI152_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18941,9 +19038,10 @@ define <2 x i64> @ult_28_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_28_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 28
+; PWR9-NEXT:    addis 3, 2, .LCPI153_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI153_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19074,9 +19172,10 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_28_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 28
+; PWR9-NEXT:    addis 3, 2, .LCPI154_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI154_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19207,9 +19306,10 @@ define <2 x i64> @ult_29_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_29_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 29
+; PWR9-NEXT:    addis 3, 2, .LCPI155_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI155_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19340,9 +19440,10 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_29_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 29
+; PWR9-NEXT:    addis 3, 2, .LCPI156_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI156_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19473,9 +19574,10 @@ define <2 x i64> @ult_30_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_30_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 30
+; PWR9-NEXT:    addis 3, 2, .LCPI157_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI157_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19606,9 +19708,10 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_30_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 30
+; PWR9-NEXT:    addis 3, 2, .LCPI158_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI158_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19739,9 +19842,10 @@ define <2 x i64> @ult_31_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_31_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 31
+; PWR9-NEXT:    addis 3, 2, .LCPI159_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI159_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19872,9 +19976,10 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_31_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 31
+; PWR9-NEXT:    addis 3, 2, .LCPI160_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI160_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20005,9 +20110,10 @@ define <2 x i64> @ult_32_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_32_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 32
+; PWR9-NEXT:    addis 3, 2, .LCPI161_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI161_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20138,9 +20244,10 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_32_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 32
+; PWR9-NEXT:    addis 3, 2, .LCPI162_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI162_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20271,9 +20378,10 @@ define <2 x i64> @ult_33_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_33_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 33
+; PWR9-NEXT:    addis 3, 2, .LCPI163_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI163_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20404,9 +20512,10 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_33_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 33
+; PWR9-NEXT:    addis 3, 2, .LCPI164_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI164_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20537,9 +20646,10 @@ define <2 x i64> @ult_34_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_34_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 34
+; PWR9-NEXT:    addis 3, 2, .LCPI165_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI165_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20670,9 +20780,10 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_34_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 34
+; PWR9-NEXT:    addis 3, 2, .LCPI166_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI166_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20803,9 +20914,10 @@ define <2 x i64> @ult_35_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_35_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 35
+; PWR9-NEXT:    addis 3, 2, .LCPI167_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI167_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20936,9 +21048,10 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_35_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 35
+; PWR9-NEXT:    addis 3, 2, .LCPI168_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI168_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21069,9 +21182,10 @@ define <2 x i64> @ult_36_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_36_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 36
+; PWR9-NEXT:    addis 3, 2, .LCPI169_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI169_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21202,9 +21316,10 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_36_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 36
+; PWR9-NEXT:    addis 3, 2, .LCPI170_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI170_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21335,9 +21450,10 @@ define <2 x i64> @ult_37_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_37_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 37
+; PWR9-NEXT:    addis 3, 2, .LCPI171_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI171_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21468,9 +21584,10 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_37_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 37
+; PWR9-NEXT:    addis 3, 2, .LCPI172_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI172_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21601,9 +21718,10 @@ define <2 x i64> @ult_38_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_38_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 38
+; PWR9-NEXT:    addis 3, 2, .LCPI173_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI173_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21734,9 +21852,10 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_38_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 38
+; PWR9-NEXT:    addis 3, 2, .LCPI174_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI174_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21867,9 +21986,10 @@ define <2 x i64> @ult_39_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_39_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 39
+; PWR9-NEXT:    addis 3, 2, .LCPI175_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI175_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22000,9 +22120,10 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_39_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 39
+; PWR9-NEXT:    addis 3, 2, .LCPI176_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI176_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22133,9 +22254,10 @@ define <2 x i64> @ult_40_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_40_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 40
+; PWR9-NEXT:    addis 3, 2, .LCPI177_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI177_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22266,9 +22388,10 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_40_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 40
+; PWR9-NEXT:    addis 3, 2, .LCPI178_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI178_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22399,9 +22522,10 @@ define <2 x i64> @ult_41_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_41_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 41
+; PWR9-NEXT:    addis 3, 2, .LCPI179_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI179_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22532,9 +22656,10 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_41_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 41
+; PWR9-NEXT:    addis 3, 2, .LCPI180_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI180_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22665,9 +22790,10 @@ define <2 x i64> @ult_42_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_42_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 42
+; PWR9-NEXT:    addis 3, 2, .LCPI181_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI181_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22798,9 +22924,10 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_42_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 42
+; PWR9-NEXT:    addis 3, 2, .LCPI182_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI182_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22931,9 +23058,10 @@ define <2 x i64> @ult_43_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_43_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 43
+; PWR9-NEXT:    addis 3, 2, .LCPI183_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI183_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23064,9 +23192,10 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_43_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 43
+; PWR9-NEXT:    addis 3, 2, .LCPI184_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI184_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23197,9 +23326,10 @@ define <2 x i64> @ult_44_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_44_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 44
+; PWR9-NEXT:    addis 3, 2, .LCPI185_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI185_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23330,9 +23460,10 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_44_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 44
+; PWR9-NEXT:    addis 3, 2, .LCPI186_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI186_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23463,9 +23594,10 @@ define <2 x i64> @ult_45_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_45_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 45
+; PWR9-NEXT:    addis 3, 2, .LCPI187_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI187_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23596,9 +23728,10 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_45_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 45
+; PWR9-NEXT:    addis 3, 2, .LCPI188_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI188_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23729,9 +23862,10 @@ define <2 x i64> @ult_46_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_46_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 46
+; PWR9-NEXT:    addis 3, 2, .LCPI189_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI189_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23862,9 +23996,10 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_46_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 46
+; PWR9-NEXT:    addis 3, 2, .LCPI190_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI190_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23995,9 +24130,10 @@ define <2 x i64> @ult_47_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_47_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 47
+; PWR9-NEXT:    addis 3, 2, .LCPI191_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI191_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24128,9 +24264,10 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_47_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 47
+; PWR9-NEXT:    addis 3, 2, .LCPI192_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI192_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24261,9 +24398,10 @@ define <2 x i64> @ult_48_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_48_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 48
+; PWR9-NEXT:    addis 3, 2, .LCPI193_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI193_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24394,9 +24532,10 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_48_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 48
+; PWR9-NEXT:    addis 3, 2, .LCPI194_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI194_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24527,9 +24666,10 @@ define <2 x i64> @ult_49_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_49_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 49
+; PWR9-NEXT:    addis 3, 2, .LCPI195_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI195_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24660,9 +24800,10 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_49_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 49
+; PWR9-NEXT:    addis 3, 2, .LCPI196_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI196_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24793,9 +24934,10 @@ define <2 x i64> @ult_50_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_50_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 50
+; PWR9-NEXT:    addis 3, 2, .LCPI197_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI197_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24926,9 +25068,10 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_50_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 50
+; PWR9-NEXT:    addis 3, 2, .LCPI198_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI198_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25059,9 +25202,10 @@ define <2 x i64> @ult_51_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_51_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 51
+; PWR9-NEXT:    addis 3, 2, .LCPI199_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI199_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25192,9 +25336,10 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_51_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 51
+; PWR9-NEXT:    addis 3, 2, .LCPI200_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI200_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25325,9 +25470,10 @@ define <2 x i64> @ult_52_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_52_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 52
+; PWR9-NEXT:    addis 3, 2, .LCPI201_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI201_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25458,9 +25604,10 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_52_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 52
+; PWR9-NEXT:    addis 3, 2, .LCPI202_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI202_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25591,9 +25738,10 @@ define <2 x i64> @ult_53_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_53_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 53
+; PWR9-NEXT:    addis 3, 2, .LCPI203_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI203_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25724,9 +25872,10 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_53_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 53
+; PWR9-NEXT:    addis 3, 2, .LCPI204_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI204_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25857,9 +26006,10 @@ define <2 x i64> @ult_54_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_54_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 54
+; PWR9-NEXT:    addis 3, 2, .LCPI205_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI205_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25990,9 +26140,10 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_54_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 54
+; PWR9-NEXT:    addis 3, 2, .LCPI206_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI206_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26123,9 +26274,10 @@ define <2 x i64> @ult_55_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_55_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 55
+; PWR9-NEXT:    addis 3, 2, .LCPI207_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI207_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26256,9 +26408,10 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_55_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 55
+; PWR9-NEXT:    addis 3, 2, .LCPI208_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI208_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26389,9 +26542,10 @@ define <2 x i64> @ult_56_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_56_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 56
+; PWR9-NEXT:    addis 3, 2, .LCPI209_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI209_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26522,9 +26676,10 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_56_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 56
+; PWR9-NEXT:    addis 3, 2, .LCPI210_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI210_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26655,9 +26810,10 @@ define <2 x i64> @ult_57_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_57_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 57
+; PWR9-NEXT:    addis 3, 2, .LCPI211_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI211_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26788,9 +26944,10 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_57_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 57
+; PWR9-NEXT:    addis 3, 2, .LCPI212_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI212_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26921,9 +27078,10 @@ define <2 x i64> @ult_58_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_58_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 58
+; PWR9-NEXT:    addis 3, 2, .LCPI213_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI213_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27054,9 +27212,10 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_58_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 58
+; PWR9-NEXT:    addis 3, 2, .LCPI214_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI214_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27187,9 +27346,10 @@ define <2 x i64> @ult_59_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_59_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 59
+; PWR9-NEXT:    addis 3, 2, .LCPI215_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI215_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27320,9 +27480,10 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_59_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 59
+; PWR9-NEXT:    addis 3, 2, .LCPI216_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI216_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27453,9 +27614,10 @@ define <2 x i64> @ult_60_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_60_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 60
+; PWR9-NEXT:    addis 3, 2, .LCPI217_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI217_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27586,9 +27748,10 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_60_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 60
+; PWR9-NEXT:    addis 3, 2, .LCPI218_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI218_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27719,9 +27882,10 @@ define <2 x i64> @ult_61_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_61_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 61
+; PWR9-NEXT:    addis 3, 2, .LCPI219_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI219_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27852,9 +28016,10 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_61_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 61
+; PWR9-NEXT:    addis 3, 2, .LCPI220_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI220_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27985,9 +28150,10 @@ define <2 x i64> @ult_62_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_62_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 62
+; PWR9-NEXT:    addis 3, 2, .LCPI221_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI221_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -28118,9 +28284,10 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ugt_62_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 62
+; PWR9-NEXT:    addis 3, 2, .LCPI222_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI222_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 2, 3
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -28251,9 +28418,10 @@ define <2 x i64> @ult_63_v2i64(<2 x i64> %0) {
 ;
 ; PWR9-LABEL: ult_63_v2i64:
 ; PWR9:       # %bb.0:
-; PWR9-NEXT:    xxspltib 35, 63
+; PWR9-NEXT:    addis 3, 2, .LCPI223_0 at toc@ha
 ; PWR9-NEXT:    vpopcntd 2, 2
-; PWR9-NEXT:    vextsb2d 3, 3
+; PWR9-NEXT:    addi 3, 3, .LCPI223_0 at toc@l
+; PWR9-NEXT:    lxv 35, 0(3)
 ; PWR9-NEXT:    vcmpgtud 2, 3, 2
 ; PWR9-NEXT:    blr
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)


        


More information about the llvm-commits mailing list