[llvm] [PowerPC] extend smaller splats into bigger splats (PR #141282)
via llvm-commits
llvm-commits at lists.llvm.org
Fri May 23 12:00:53 PDT 2025
https://github.com/RolandF77 created https://github.com/llvm/llvm-project/pull/141282
For pwr9, xxspltib is a byte splat with a range -128 to 127 - it can be used with a following vector extend sign to make splats of i16, i32, or i64 element size. For pwr8, vspltisw with a following vector extend sign can be used to make splats of i64 elements in the range -16 to 15.
>From 3d6a32bfded09b4cb04ac66785eb98f1e90d5921 Mon Sep 17 00:00:00 2001
From: Roland Froese <froese at ca.ibm.com>
Date: Fri, 23 May 2025 18:50:44 +0000
Subject: [PATCH] extend smaller splats into bigger splats
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 52 +-
.../CodeGen/PowerPC/build-vector-tests.ll | 120 +--
llvm/test/CodeGen/PowerPC/mul-const-vector.ll | 18 +-
.../PowerPC/p10-splatImm-CPload-pcrel.ll | 5 +-
llvm/test/CodeGen/PowerPC/pre-inc-disable.ll | 32 +-
.../CodeGen/PowerPC/vec_add_sub_doubleword.ll | 5 +-
.../CodeGen/PowerPC/vector-extend-sign.ll | 5 +-
.../PowerPC/vector-popcnt-128-ult-ugt.ll | 904 +++++++-----------
8 files changed, 490 insertions(+), 651 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index c39b9d55cc212..5bc2ea0cfe90e 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -9664,7 +9664,25 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
}
}
- if (!BVNIsConstantSplat || SplatBitSize > 32) {
+ bool IsSplat64 = false;
+ uint64_t SplatBits = 0;
+ int32_t SextVal = 0;
+ if (BVNIsConstantSplat) {
+ if (SplatBitSize <= 32) {
+ SplatBits = APSplatBits.getZExtValue();
+ SextVal = SignExtend32(SplatBits, SplatBitSize);
+ } else if (SplatBitSize == 64) {
+ int64_t Splat64Val = APSplatBits.getSExtValue();
+ SplatBits = (uint64_t) Splat64Val;
+ SextVal = (int32_t) SplatBits;
+ bool P9Vector = Subtarget.hasP9Vector();
+ int32_t Hi = P9Vector ? 127 : 15;
+ int32_t Lo = P9Vector ? -128 : -16;
+ IsSplat64 = Splat64Val >= Lo && Splat64Val <= Hi;
+ }
+ }
+
+ if (!BVNIsConstantSplat || (SplatBitSize > 32 && !IsSplat64)) {
unsigned NewOpcode = PPCISD::LD_SPLAT;
// Handle load-and-splat patterns as we have instructions that will do this
@@ -9750,7 +9768,6 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
return SDValue();
}
- uint64_t SplatBits = APSplatBits.getZExtValue();
uint64_t SplatUndef = APSplatUndef.getZExtValue();
unsigned SplatSize = SplatBitSize / 8;
@@ -9785,13 +9802,36 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
dl);
// If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
- int32_t SextVal = SignExtend32(SplatBits, SplatBitSize);
- if (SextVal >= -16 && SextVal <= 15)
- return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
- dl);
+ if (SextVal >= -16 && SextVal <= 15) {
+ unsigned UseSize = SplatSize == 8 ? 4 : SplatSize;
+ SDValue Res =
+ getCanonicalConstSplat(SextVal, UseSize, Op.getValueType(), DAG, dl);
+ if (SplatSize != 8)
+ return Res;
+ return BuildIntrinsicOp(Intrinsic::ppc_altivec_vupklsw, Res, DAG, dl);
+ }
// Two instruction sequences.
+ if (Subtarget.hasP9Vector() && SextVal >= -128 && SextVal <= 127) {
+ SDValue C = DAG.getConstant((unsigned char) SextVal, dl, MVT::i32);
+ SmallVector<SDValue, 16> Ops(16, C);
+ SDValue BV = DAG.getBuildVector(MVT::v16i8, dl, Ops);
+ assert((SplatSize == 2 || SplatSize == 4 || SplatSize == 8) &&
+ "Unexpected type for vector constant.");
+ unsigned IID;
+ if (SplatSize == 2) {
+ IID = Intrinsic::ppc_altivec_vupklsb;
+ } else if (SplatSize == 4) {
+ IID = Intrinsic::ppc_altivec_vextsb2w;
+ } else { // SplatSize == 8
+ IID = Intrinsic::ppc_altivec_vextsb2d;
+ }
+ SDValue Extend = BuildIntrinsicOp(IID, BV, DAG, dl);
+ return DAG.getBitcast(Op->getValueType(0), Extend);
+ }
+ assert(!IsSplat64 && "Unhandled 64-bit splat pattern");
+
// If this value is in the range [-32,30] and is even, use:
// VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
// If this value is in the range [17,31] and is odd, use:
diff --git a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
index 91431ed15f6a7..9dd0fbe4474b1 100644
--- a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -3713,30 +3713,26 @@ entry:
define <2 x i64> @spltConst1ll() {
; P9BE-LABEL: spltConst1ll:
; P9BE: # %bb.0: # %entry
-; P9BE-NEXT: addis r3, r2, .LCPI65_0 at toc@ha
-; P9BE-NEXT: addi r3, r3, .LCPI65_0 at toc@l
-; P9BE-NEXT: lxv v2, 0(r3)
+; P9BE-NEXT: vspltisw v2, 1
+; P9BE-NEXT: vupklsw v2, v2
; P9BE-NEXT: blr
;
; P9LE-LABEL: spltConst1ll:
; P9LE: # %bb.0: # %entry
-; P9LE-NEXT: addis r3, r2, .LCPI65_0 at toc@ha
-; P9LE-NEXT: addi r3, r3, .LCPI65_0 at toc@l
-; P9LE-NEXT: lxv v2, 0(r3)
+; P9LE-NEXT: vspltisw v2, 1
+; P9LE-NEXT: vupklsw v2, v2
; P9LE-NEXT: blr
;
; P8BE-LABEL: spltConst1ll:
; P8BE: # %bb.0: # %entry
-; P8BE-NEXT: addis r3, r2, .LCPI65_0 at toc@ha
-; P8BE-NEXT: addi r3, r3, .LCPI65_0 at toc@l
-; P8BE-NEXT: lxvd2x v2, 0, r3
+; P8BE-NEXT: vspltisw v2, 1
+; P8BE-NEXT: vupklsw v2, v2
; P8BE-NEXT: blr
;
; P8LE-LABEL: spltConst1ll:
; P8LE: # %bb.0: # %entry
-; P8LE-NEXT: addis r3, r2, .LCPI65_0 at toc@ha
-; P8LE-NEXT: addi r3, r3, .LCPI65_0 at toc@l
-; P8LE-NEXT: lxvd2x v2, 0, r3
+; P8LE-NEXT: vspltisw v2, 1
+; P8LE-NEXT: vupklsw v2, v2
; P8LE-NEXT: blr
entry:
ret <2 x i64> <i64 1, i64 1>
@@ -4173,30 +4169,26 @@ entry:
define <2 x i64> @spltCnstConvftoll() {
; P9BE-LABEL: spltCnstConvftoll:
; P9BE: # %bb.0: # %entry
-; P9BE-NEXT: addis r3, r2, .LCPI78_0 at toc@ha
-; P9BE-NEXT: addi r3, r3, .LCPI78_0 at toc@l
-; P9BE-NEXT: lxv v2, 0(r3)
+; P9BE-NEXT: vspltisw v2, 4
+; P9BE-NEXT: vupklsw v2, v2
; P9BE-NEXT: blr
;
; P9LE-LABEL: spltCnstConvftoll:
; P9LE: # %bb.0: # %entry
-; P9LE-NEXT: addis r3, r2, .LCPI78_0 at toc@ha
-; P9LE-NEXT: addi r3, r3, .LCPI78_0 at toc@l
-; P9LE-NEXT: lxv v2, 0(r3)
+; P9LE-NEXT: vspltisw v2, 4
+; P9LE-NEXT: vupklsw v2, v2
; P9LE-NEXT: blr
;
; P8BE-LABEL: spltCnstConvftoll:
; P8BE: # %bb.0: # %entry
-; P8BE-NEXT: addis r3, r2, .LCPI78_0 at toc@ha
-; P8BE-NEXT: addi r3, r3, .LCPI78_0 at toc@l
-; P8BE-NEXT: lxvd2x v2, 0, r3
+; P8BE-NEXT: vspltisw v2, 4
+; P8BE-NEXT: vupklsw v2, v2
; P8BE-NEXT: blr
;
; P8LE-LABEL: spltCnstConvftoll:
; P8LE: # %bb.0: # %entry
-; P8LE-NEXT: addis r3, r2, .LCPI78_0 at toc@ha
-; P8LE-NEXT: addi r3, r3, .LCPI78_0 at toc@l
-; P8LE-NEXT: lxvd2x v2, 0, r3
+; P8LE-NEXT: vspltisw v2, 4
+; P8LE-NEXT: vupklsw v2, v2
; P8LE-NEXT: blr
entry:
ret <2 x i64> <i64 4, i64 4>
@@ -4526,30 +4518,26 @@ entry:
define <2 x i64> @spltCnstConvdtoll() {
; P9BE-LABEL: spltCnstConvdtoll:
; P9BE: # %bb.0: # %entry
-; P9BE-NEXT: addis r3, r2, .LCPI87_0 at toc@ha
-; P9BE-NEXT: addi r3, r3, .LCPI87_0 at toc@l
-; P9BE-NEXT: lxv v2, 0(r3)
+; P9BE-NEXT: vspltisw v2, 4
+; P9BE-NEXT: vupklsw v2, v2
; P9BE-NEXT: blr
;
; P9LE-LABEL: spltCnstConvdtoll:
; P9LE: # %bb.0: # %entry
-; P9LE-NEXT: addis r3, r2, .LCPI87_0 at toc@ha
-; P9LE-NEXT: addi r3, r3, .LCPI87_0 at toc@l
-; P9LE-NEXT: lxv v2, 0(r3)
+; P9LE-NEXT: vspltisw v2, 4
+; P9LE-NEXT: vupklsw v2, v2
; P9LE-NEXT: blr
;
; P8BE-LABEL: spltCnstConvdtoll:
; P8BE: # %bb.0: # %entry
-; P8BE-NEXT: addis r3, r2, .LCPI87_0 at toc@ha
-; P8BE-NEXT: addi r3, r3, .LCPI87_0 at toc@l
-; P8BE-NEXT: lxvd2x v2, 0, r3
+; P8BE-NEXT: vspltisw v2, 4
+; P8BE-NEXT: vupklsw v2, v2
; P8BE-NEXT: blr
;
; P8LE-LABEL: spltCnstConvdtoll:
; P8LE: # %bb.0: # %entry
-; P8LE-NEXT: addis r3, r2, .LCPI87_0 at toc@ha
-; P8LE-NEXT: addi r3, r3, .LCPI87_0 at toc@l
-; P8LE-NEXT: lxvd2x v2, 0, r3
+; P8LE-NEXT: vspltisw v2, 4
+; P8LE-NEXT: vupklsw v2, v2
; P8LE-NEXT: blr
entry:
ret <2 x i64> <i64 4, i64 4>
@@ -4879,30 +4867,26 @@ entry:
define <2 x i64> @spltConst1ull() {
; P9BE-LABEL: spltConst1ull:
; P9BE: # %bb.0: # %entry
-; P9BE-NEXT: addis r3, r2, .LCPI97_0 at toc@ha
-; P9BE-NEXT: addi r3, r3, .LCPI97_0 at toc@l
-; P9BE-NEXT: lxv v2, 0(r3)
+; P9BE-NEXT: vspltisw v2, 1
+; P9BE-NEXT: vupklsw v2, v2
; P9BE-NEXT: blr
;
; P9LE-LABEL: spltConst1ull:
; P9LE: # %bb.0: # %entry
-; P9LE-NEXT: addis r3, r2, .LCPI97_0 at toc@ha
-; P9LE-NEXT: addi r3, r3, .LCPI97_0 at toc@l
-; P9LE-NEXT: lxv v2, 0(r3)
+; P9LE-NEXT: vspltisw v2, 1
+; P9LE-NEXT: vupklsw v2, v2
; P9LE-NEXT: blr
;
; P8BE-LABEL: spltConst1ull:
; P8BE: # %bb.0: # %entry
-; P8BE-NEXT: addis r3, r2, .LCPI97_0 at toc@ha
-; P8BE-NEXT: addi r3, r3, .LCPI97_0 at toc@l
-; P8BE-NEXT: lxvd2x v2, 0, r3
+; P8BE-NEXT: vspltisw v2, 1
+; P8BE-NEXT: vupklsw v2, v2
; P8BE-NEXT: blr
;
; P8LE-LABEL: spltConst1ull:
; P8LE: # %bb.0: # %entry
-; P8LE-NEXT: addis r3, r2, .LCPI97_0 at toc@ha
-; P8LE-NEXT: addi r3, r3, .LCPI97_0 at toc@l
-; P8LE-NEXT: lxvd2x v2, 0, r3
+; P8LE-NEXT: vspltisw v2, 1
+; P8LE-NEXT: vupklsw v2, v2
; P8LE-NEXT: blr
entry:
ret <2 x i64> <i64 1, i64 1>
@@ -5339,30 +5323,26 @@ entry:
define <2 x i64> @spltCnstConvftoull() {
; P9BE-LABEL: spltCnstConvftoull:
; P9BE: # %bb.0: # %entry
-; P9BE-NEXT: addis r3, r2, .LCPI110_0 at toc@ha
-; P9BE-NEXT: addi r3, r3, .LCPI110_0 at toc@l
-; P9BE-NEXT: lxv v2, 0(r3)
+; P9BE-NEXT: vspltisw v2, 4
+; P9BE-NEXT: vupklsw v2, v2
; P9BE-NEXT: blr
;
; P9LE-LABEL: spltCnstConvftoull:
; P9LE: # %bb.0: # %entry
-; P9LE-NEXT: addis r3, r2, .LCPI110_0 at toc@ha
-; P9LE-NEXT: addi r3, r3, .LCPI110_0 at toc@l
-; P9LE-NEXT: lxv v2, 0(r3)
+; P9LE-NEXT: vspltisw v2, 4
+; P9LE-NEXT: vupklsw v2, v2
; P9LE-NEXT: blr
;
; P8BE-LABEL: spltCnstConvftoull:
; P8BE: # %bb.0: # %entry
-; P8BE-NEXT: addis r3, r2, .LCPI110_0 at toc@ha
-; P8BE-NEXT: addi r3, r3, .LCPI110_0 at toc@l
-; P8BE-NEXT: lxvd2x v2, 0, r3
+; P8BE-NEXT: vspltisw v2, 4
+; P8BE-NEXT: vupklsw v2, v2
; P8BE-NEXT: blr
;
; P8LE-LABEL: spltCnstConvftoull:
; P8LE: # %bb.0: # %entry
-; P8LE-NEXT: addis r3, r2, .LCPI110_0 at toc@ha
-; P8LE-NEXT: addi r3, r3, .LCPI110_0 at toc@l
-; P8LE-NEXT: lxvd2x v2, 0, r3
+; P8LE-NEXT: vspltisw v2, 4
+; P8LE-NEXT: vupklsw v2, v2
; P8LE-NEXT: blr
entry:
ret <2 x i64> <i64 4, i64 4>
@@ -5692,30 +5672,26 @@ entry:
define <2 x i64> @spltCnstConvdtoull() {
; P9BE-LABEL: spltCnstConvdtoull:
; P9BE: # %bb.0: # %entry
-; P9BE-NEXT: addis r3, r2, .LCPI119_0 at toc@ha
-; P9BE-NEXT: addi r3, r3, .LCPI119_0 at toc@l
-; P9BE-NEXT: lxv v2, 0(r3)
+; P9BE-NEXT: vspltisw v2, 4
+; P9BE-NEXT: vupklsw v2, v2
; P9BE-NEXT: blr
;
; P9LE-LABEL: spltCnstConvdtoull:
; P9LE: # %bb.0: # %entry
-; P9LE-NEXT: addis r3, r2, .LCPI119_0 at toc@ha
-; P9LE-NEXT: addi r3, r3, .LCPI119_0 at toc@l
-; P9LE-NEXT: lxv v2, 0(r3)
+; P9LE-NEXT: vspltisw v2, 4
+; P9LE-NEXT: vupklsw v2, v2
; P9LE-NEXT: blr
;
; P8BE-LABEL: spltCnstConvdtoull:
; P8BE: # %bb.0: # %entry
-; P8BE-NEXT: addis r3, r2, .LCPI119_0 at toc@ha
-; P8BE-NEXT: addi r3, r3, .LCPI119_0 at toc@l
-; P8BE-NEXT: lxvd2x v2, 0, r3
+; P8BE-NEXT: vspltisw v2, 4
+; P8BE-NEXT: vupklsw v2, v2
; P8BE-NEXT: blr
;
; P8LE-LABEL: spltCnstConvdtoull:
; P8LE: # %bb.0: # %entry
-; P8LE-NEXT: addis r3, r2, .LCPI119_0 at toc@ha
-; P8LE-NEXT: addi r3, r3, .LCPI119_0 at toc@l
-; P8LE-NEXT: lxvd2x v2, 0, r3
+; P8LE-NEXT: vspltisw v2, 4
+; P8LE-NEXT: vupklsw v2, v2
; P8LE-NEXT: blr
entry:
ret <2 x i64> <i64 4, i64 4>
diff --git a/llvm/test/CodeGen/PowerPC/mul-const-vector.ll b/llvm/test/CodeGen/PowerPC/mul-const-vector.ll
index e3d231adf734f..2d67de0d4477c 100644
--- a/llvm/test/CodeGen/PowerPC/mul-const-vector.ll
+++ b/llvm/test/CodeGen/PowerPC/mul-const-vector.ll
@@ -271,8 +271,7 @@ define <2 x i64> @test1_v2i64(<2 x i64> %a) {
ret <2 x i64> %tmp.1
}
; CHECK-LABEL: test1_v2i64:
-; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
-; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
+; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
; CHECK-NOT: vmul
; CHECK-NEXT: vsld v{{[0-9]+}}, v2, v[[REG2]]
@@ -282,8 +281,7 @@ define <2 x i64> @test2_v2i64(<2 x i64> %a) {
}
; CHECK-LABEL: test2_v2i64:
-; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
-; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
+; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
; CHECK-NOT: vmul
; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
; CHECK-NEXT: vaddudm v{{[0-9]+}}, v2, v[[REG3]]
@@ -294,8 +292,7 @@ define <2 x i64> @test3_v2i64(<2 x i64> %a) {
}
; CHECK-LABEL: test3_v2i64:
-; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
-; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
+; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
; CHECK-NOT: vmul
; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
; CHECK-NEXT: vsubudm v{{[0-9]+}}, v[[REG3]], v2
@@ -308,8 +305,7 @@ define <2 x i64> @test4_v2i64(<2 x i64> %a) {
}
; CHECK-LABEL: test4_v2i64:
-; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
-; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
+; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
; CHECK-NOT: vmul
; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
; CHECK-P8-NEXT: xxlxor v[[REG4:[0-9]+]],
@@ -322,8 +318,7 @@ define <2 x i64> @test5_v2i64(<2 x i64> %a) {
}
; CHECK-LABEL: test5_v2i64:
-; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
-; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
+; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
; CHECK-NOT: vmul
; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
; CHECK-NEXT: vaddudm v[[REG4:[0-9]+]], v2, v[[REG3]]
@@ -337,8 +332,7 @@ define <2 x i64> @test6_v2i64(<2 x i64> %a) {
}
; CHECK-LABEL: test6_v2i64:
-; CHECK-P8: lxvd2x v[[REG1:[0-9]+]], 0, r{{[0-9]+}}
-; CHECK-P9: lxv v[[REG2:[0-9]+]], 0(r{{[0-9]+}})
+; CHECK: vupklsw v[[REG1:[0-9]+]], v{{[0-9]+}}
; CHECK-NOT: vmul
; CHECK-NEXT: vsld v[[REG3:[0-9]+]], v2, v[[REG2]]
; CHECK-NEXT: vsubudm v{{[0-9]+}}, v2, v[[REG3]]
diff --git a/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll b/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll
index 842cb929541cf..1ab74e6cb1cee 100644
--- a/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-splatImm-CPload-pcrel.ll
@@ -105,9 +105,8 @@ define dso_local <2 x double> @testDoubleToDoubleNaNFail() local_unnamed_addr {
;
; CHECK-NOPREFIX-LABEL: testDoubleToDoubleNaNFail:
; CHECK-NOPREFIX: # %bb.0: # %entry
-; CHECK-NOPREFIX-NEXT: addis r3, r2, .LCPI2_0 at toc@ha
-; CHECK-NOPREFIX-NEXT: addi r3, r3, .LCPI2_0 at toc@l
-; CHECK-NOPREFIX-NEXT: lxv vs34, 0(r3)
+; CHECK-NOPREFIX-NEXT: vspltisw v2, -16
+; CHECK-NOPREFIX-NEXT: vupklsw v2, v2
; CHECK-NOPREFIX-NEXT: blr
;
; CHECK-BE-LABEL: testDoubleToDoubleNaNFail:
diff --git a/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll b/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
index 4435484ae0b94..6b29c780de600 100644
--- a/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
+++ b/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
@@ -22,10 +22,10 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9LE-NEXT: lfdx 0, 3, 4
; P9LE-NEXT: addis 3, 2, .LCPI0_0 at toc@ha
; P9LE-NEXT: xxlxor 2, 2, 2
-; P9LE-NEXT: vspltisw 4, 8
+; P9LE-NEXT: xxspltib 4, 16
; P9LE-NEXT: lxsd 3, 4(5)
; P9LE-NEXT: addi 3, 3, .LCPI0_0 at toc@l
-; P9LE-NEXT: vadduwm 4, 4, 4
+; P9LE-NEXT: vextsb2w 4, 4
; P9LE-NEXT: lxv 1, 0(3)
; P9LE-NEXT: addis 3, 2, .LCPI0_1 at toc@ha
; P9LE-NEXT: addi 3, 3, .LCPI0_1 at toc@l
@@ -45,10 +45,10 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9BE-NEXT: lxsdx 2, 3, 4
; P9BE-NEXT: addis 3, 2, .LCPI0_0 at toc@ha
; P9BE-NEXT: xxlxor 1, 1, 1
-; P9BE-NEXT: vspltisw 4, 8
+; P9BE-NEXT: xxspltib 4, 16
; P9BE-NEXT: lxsd 3, 4(5)
; P9BE-NEXT: addi 3, 3, .LCPI0_0 at toc@l
-; P9BE-NEXT: vadduwm 4, 4, 4
+; P9BE-NEXT: vextsb2w 4, 4
; P9BE-NEXT: lxv 0, 0(3)
; P9BE-NEXT: addis 3, 2, .LCPI0_1 at toc@ha
; P9BE-NEXT: addi 3, 3, .LCPI0_1 at toc@l
@@ -68,11 +68,11 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9BE-AIX-NEXT: lxsdx 2, 3, 4
; P9BE-AIX-NEXT: ld 3, L..C0(2) # %const.0
; P9BE-AIX-NEXT: xxlxor 1, 1, 1
-; P9BE-AIX-NEXT: vspltisw 4, 8
+; P9BE-AIX-NEXT: xxspltib 4, 16
; P9BE-AIX-NEXT: lxsd 3, 4(5)
; P9BE-AIX-NEXT: lxv 0, 0(3)
; P9BE-AIX-NEXT: ld 3, L..C1(2) # %const.1
-; P9BE-AIX-NEXT: vadduwm 4, 4, 4
+; P9BE-AIX-NEXT: vextsb2w 4, 4
; P9BE-AIX-NEXT: xxperm 2, 1, 0
; P9BE-AIX-NEXT: lxv 0, 0(3)
; P9BE-AIX-NEXT: xxperm 3, 3, 0
@@ -89,10 +89,10 @@ define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9BE-AIX32-NEXT: lxvwsx 0, 3, 4
; P9BE-AIX32-NEXT: li 3, 4
; P9BE-AIX32-NEXT: xxlxor 2, 2, 2
-; P9BE-AIX32-NEXT: vspltisw 4, 8
+; P9BE-AIX32-NEXT: xxspltib 4, 16
; P9BE-AIX32-NEXT: lxvwsx 1, 5, 3
; P9BE-AIX32-NEXT: lwz 3, L..C0(2) # %const.0
-; P9BE-AIX32-NEXT: vadduwm 4, 4, 4
+; P9BE-AIX32-NEXT: vextsb2w 4, 4
; P9BE-AIX32-NEXT: xxmrghw 2, 0, 1
; P9BE-AIX32-NEXT: lxv 0, 0(3)
; P9BE-AIX32-NEXT: li 3, 8
@@ -137,11 +137,11 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9LE-NEXT: lxsiwzx 2, 3, 4
; P9LE-NEXT: addis 3, 2, .LCPI1_0 at toc@ha
; P9LE-NEXT: xxlxor 0, 0, 0
-; P9LE-NEXT: vspltisw 4, 8
+; P9LE-NEXT: xxspltib 4, 16
; P9LE-NEXT: addi 3, 3, .LCPI1_0 at toc@l
; P9LE-NEXT: lxv 1, 0(3)
; P9LE-NEXT: li 3, 4
-; P9LE-NEXT: vadduwm 4, 4, 4
+; P9LE-NEXT: vextsb2w 4, 4
; P9LE-NEXT: lxsiwzx 3, 5, 3
; P9LE-NEXT: xxperm 2, 0, 1
; P9LE-NEXT: xxperm 3, 0, 1
@@ -158,11 +158,11 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9BE-NEXT: lxsiwzx 2, 3, 4
; P9BE-NEXT: addis 3, 2, .LCPI1_0 at toc@ha
; P9BE-NEXT: xxlxor 0, 0, 0
-; P9BE-NEXT: vspltisw 4, 8
+; P9BE-NEXT: xxspltib 4, 16
; P9BE-NEXT: addi 3, 3, .LCPI1_0 at toc@l
; P9BE-NEXT: lxv 1, 0(3)
; P9BE-NEXT: li 3, 4
-; P9BE-NEXT: vadduwm 4, 4, 4
+; P9BE-NEXT: vextsb2w 4, 4
; P9BE-NEXT: lxsiwzx 3, 5, 3
; P9BE-NEXT: xxperm 2, 0, 1
; P9BE-NEXT: xxperm 3, 0, 1
@@ -179,10 +179,10 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9BE-AIX-NEXT: lxsiwzx 2, 3, 4
; P9BE-AIX-NEXT: ld 3, L..C2(2) # %const.0
; P9BE-AIX-NEXT: xxlxor 0, 0, 0
-; P9BE-AIX-NEXT: vspltisw 4, 8
+; P9BE-AIX-NEXT: xxspltib 4, 16
; P9BE-AIX-NEXT: lxv 1, 0(3)
; P9BE-AIX-NEXT: li 3, 4
-; P9BE-AIX-NEXT: vadduwm 4, 4, 4
+; P9BE-AIX-NEXT: vextsb2w 4, 4
; P9BE-AIX-NEXT: lxsiwzx 3, 5, 3
; P9BE-AIX-NEXT: xxperm 2, 0, 1
; P9BE-AIX-NEXT: xxperm 3, 0, 1
@@ -199,10 +199,10 @@ define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9BE-AIX32-NEXT: lxsiwzx 2, 3, 4
; P9BE-AIX32-NEXT: lwz 3, L..C2(2) # %const.0
; P9BE-AIX32-NEXT: xxlxor 0, 0, 0
-; P9BE-AIX32-NEXT: vspltisw 4, 8
+; P9BE-AIX32-NEXT: xxspltib 4, 16
; P9BE-AIX32-NEXT: lxv 1, 0(3)
; P9BE-AIX32-NEXT: li 3, 4
-; P9BE-AIX32-NEXT: vadduwm 4, 4, 4
+; P9BE-AIX32-NEXT: vextsb2w 4, 4
; P9BE-AIX32-NEXT: lxsiwzx 3, 5, 3
; P9BE-AIX32-NEXT: xxperm 2, 0, 1
; P9BE-AIX32-NEXT: xxperm 3, 0, 1
diff --git a/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll b/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
index 3f7e0b694ce4d..210aee13486c3 100644
--- a/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll
@@ -16,9 +16,8 @@ define <2 x i64> @test_add(<2 x i64> %x, <2 x i64> %y) nounwind {
define <2 x i64> @increment_by_one(<2 x i64> %x) nounwind {
; VSX-LABEL: increment_by_one:
; VSX: # %bb.0:
-; VSX-NEXT: addis 3, 2, .LCPI1_0 at toc@ha
-; VSX-NEXT: addi 3, 3, .LCPI1_0 at toc@l
-; VSX-NEXT: lxvd2x 35, 0, 3
+; VSX-NEXT: vspltisw 3, 1
+; VSX-NEXT: vupklsw 3, 3
; VSX-NEXT: vaddudm 2, 2, 3
; VSX-NEXT: blr
;
diff --git a/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll b/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll
index 540a00fa84c5d..ef3988e20e1f0 100644
--- a/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-extend-sign.ll
@@ -144,9 +144,8 @@ entry:
define <2 x i64> @test_none(<2 x i64> %m) {
; CHECK-P9-LABEL: test_none:
; CHECK-P9: # %bb.0: # %entry
-; CHECK-P9-NEXT: addis 3, 2, .LCPI5_0 at toc@ha
-; CHECK-P9-NEXT: addi 3, 3, .LCPI5_0 at toc@l
-; CHECK-P9-NEXT: lxv 35, 0(3)
+; CHECK-P9-NEXT: xxspltib 35, 16
+; CHECK-P9-NEXT: vextsb2d 3, 3
; CHECK-P9-NEXT: vsld 2, 2, 3
; CHECK-P9-NEXT: vsrad 2, 2, 3
; CHECK-P9-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll b/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll
index 04351346745b3..43cbc62e0bb1c 100644
--- a/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll
@@ -7838,9 +7838,9 @@ define <4 x i32> @ult_16_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_16_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 8
+; PWR9-NEXT: xxspltib 35, 16
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -7974,9 +7974,9 @@ define <4 x i32> @ugt_16_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_16_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 8
+; PWR9-NEXT: xxspltib 35, 16
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8108,10 +8108,9 @@ define <4 x i32> @ult_17_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_17_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 1
+; PWR9-NEXT: xxspltib 35, 17
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8243,10 +8242,9 @@ define <4 x i32> @ugt_17_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_17_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 1
+; PWR9-NEXT: xxspltib 35, 17
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8380,9 +8378,9 @@ define <4 x i32> @ult_18_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_18_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 9
+; PWR9-NEXT: xxspltib 35, 18
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8516,9 +8514,9 @@ define <4 x i32> @ugt_18_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_18_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 9
+; PWR9-NEXT: xxspltib 35, 18
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8653,10 +8651,9 @@ define <4 x i32> @ult_19_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_19_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 3
+; PWR9-NEXT: xxspltib 35, 19
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8791,10 +8788,9 @@ define <4 x i32> @ugt_19_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_19_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 3
+; PWR9-NEXT: xxspltib 35, 19
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -8928,9 +8924,9 @@ define <4 x i32> @ult_20_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_20_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 10
+; PWR9-NEXT: xxspltib 35, 20
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9064,9 +9060,9 @@ define <4 x i32> @ugt_20_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_20_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 10
+; PWR9-NEXT: xxspltib 35, 20
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9201,10 +9197,9 @@ define <4 x i32> @ult_21_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_21_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 5
+; PWR9-NEXT: xxspltib 35, 21
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9339,10 +9334,9 @@ define <4 x i32> @ugt_21_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_21_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 5
+; PWR9-NEXT: xxspltib 35, 21
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9476,9 +9470,9 @@ define <4 x i32> @ult_22_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_22_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 11
+; PWR9-NEXT: xxspltib 35, 22
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9612,9 +9606,9 @@ define <4 x i32> @ugt_22_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_22_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 11
+; PWR9-NEXT: xxspltib 35, 22
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9749,10 +9743,9 @@ define <4 x i32> @ult_23_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_23_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 7
+; PWR9-NEXT: xxspltib 35, 23
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -9887,10 +9880,9 @@ define <4 x i32> @ugt_23_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_23_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 7
+; PWR9-NEXT: xxspltib 35, 23
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10018,9 +10010,9 @@ define <4 x i32> @ult_24_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_24_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 12
+; PWR9-NEXT: xxspltib 35, 24
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10148,9 +10140,9 @@ define <4 x i32> @ugt_24_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_24_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 12
+; PWR9-NEXT: xxspltib 35, 24
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10285,10 +10277,9 @@ define <4 x i32> @ult_25_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_25_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 9
+; PWR9-NEXT: xxspltib 35, 25
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10423,10 +10414,9 @@ define <4 x i32> @ugt_25_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_25_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 9
+; PWR9-NEXT: xxspltib 35, 25
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10560,9 +10550,9 @@ define <4 x i32> @ult_26_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_26_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 13
+; PWR9-NEXT: xxspltib 35, 26
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10696,9 +10686,9 @@ define <4 x i32> @ugt_26_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_26_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 13
+; PWR9-NEXT: xxspltib 35, 26
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10833,10 +10823,9 @@ define <4 x i32> @ult_27_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_27_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 11
+; PWR9-NEXT: xxspltib 35, 27
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -10971,10 +10960,9 @@ define <4 x i32> @ugt_27_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_27_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 11
+; PWR9-NEXT: xxspltib 35, 27
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11108,9 +11096,9 @@ define <4 x i32> @ult_28_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_28_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 14
+; PWR9-NEXT: xxspltib 35, 28
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11244,9 +11232,9 @@ define <4 x i32> @ugt_28_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_28_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 14
+; PWR9-NEXT: xxspltib 35, 28
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11381,10 +11369,9 @@ define <4 x i32> @ult_29_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_29_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 13
+; PWR9-NEXT: xxspltib 35, 29
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11519,10 +11506,9 @@ define <4 x i32> @ugt_29_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_29_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 13
+; PWR9-NEXT: xxspltib 35, 29
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11656,9 +11642,9 @@ define <4 x i32> @ult_30_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_30_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 15
+; PWR9-NEXT: xxspltib 35, 30
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11792,9 +11778,9 @@ define <4 x i32> @ugt_30_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ugt_30_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, 15
+; PWR9-NEXT: xxspltib 35, 30
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vadduwm 3, 3, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11929,10 +11915,9 @@ define <4 x i32> @ult_31_v4i32(<4 x i32> %0) {
;
; PWR9-LABEL: ult_31_v4i32:
; PWR9: # %bb.0:
-; PWR9-NEXT: vspltisw 3, -16
-; PWR9-NEXT: vspltisw 4, 15
+; PWR9-NEXT: xxspltib 35, 31
; PWR9-NEXT: vpopcntw 2, 2
-; PWR9-NEXT: vsubuwm 3, 4, 3
+; PWR9-NEXT: vextsb2w 3, 3
; PWR9-NEXT: vcmpgtuw 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
@@ -11991,19 +11976,17 @@ define <2 x i64> @ugt_1_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_1_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI100_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 1
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI100_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_1_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI100_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 1
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI100_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12061,19 +12044,17 @@ define <2 x i64> @ult_2_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_2_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI101_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 2
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI101_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_2_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI101_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 2
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI101_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12195,19 +12176,17 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_2_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI102_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 2
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI102_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_2_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI102_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 2
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI102_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12329,19 +12308,17 @@ define <2 x i64> @ult_3_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_3_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI103_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 3
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI103_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_3_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI103_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 3
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI103_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12463,19 +12440,17 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_3_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI104_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 3
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI104_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_3_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI104_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 3
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI104_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12597,19 +12572,17 @@ define <2 x i64> @ult_4_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_4_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI105_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 4
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI105_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_4_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI105_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 4
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI105_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12731,19 +12704,17 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_4_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI106_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 4
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI106_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_4_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI106_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 4
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI106_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12865,19 +12836,17 @@ define <2 x i64> @ult_5_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_5_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI107_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 5
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI107_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_5_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI107_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 5
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI107_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -12999,19 +12968,17 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_5_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI108_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 5
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI108_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_5_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI108_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 5
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI108_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13133,19 +13100,17 @@ define <2 x i64> @ult_6_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_6_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI109_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 6
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI109_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_6_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI109_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 6
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI109_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13267,19 +13232,17 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_6_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI110_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 6
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI110_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_6_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI110_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 6
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI110_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13401,19 +13364,17 @@ define <2 x i64> @ult_7_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_7_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI111_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 7
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI111_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_7_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI111_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 7
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI111_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13535,19 +13496,17 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_7_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI112_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 7
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI112_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_7_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI112_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 7
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI112_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13669,19 +13628,17 @@ define <2 x i64> @ult_8_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_8_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI113_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 8
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI113_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_8_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI113_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 8
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI113_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13803,19 +13760,17 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_8_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI114_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 8
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI114_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_8_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI114_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 8
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI114_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -13937,19 +13892,17 @@ define <2 x i64> @ult_9_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_9_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI115_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 9
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI115_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_9_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI115_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 9
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI115_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14071,19 +14024,17 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_9_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI116_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 9
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI116_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_9_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI116_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 9
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI116_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14205,19 +14156,17 @@ define <2 x i64> @ult_10_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_10_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI117_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 10
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI117_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_10_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI117_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 10
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI117_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14339,19 +14288,17 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_10_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI118_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 10
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI118_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_10_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI118_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 10
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI118_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14473,19 +14420,17 @@ define <2 x i64> @ult_11_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_11_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI119_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 11
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI119_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_11_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI119_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 11
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI119_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14607,19 +14552,17 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_11_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI120_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 11
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI120_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_11_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI120_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 11
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI120_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14741,19 +14684,17 @@ define <2 x i64> @ult_12_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_12_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI121_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 12
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI121_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_12_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI121_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 12
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI121_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -14875,19 +14816,17 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_12_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI122_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 12
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI122_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_12_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI122_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 12
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI122_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15009,19 +14948,17 @@ define <2 x i64> @ult_13_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_13_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI123_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 13
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI123_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_13_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI123_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 13
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI123_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15143,19 +15080,17 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_13_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI124_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 13
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI124_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_13_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI124_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 13
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI124_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15277,19 +15212,17 @@ define <2 x i64> @ult_14_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_14_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI125_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 14
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI125_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_14_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI125_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 14
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI125_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15411,19 +15344,17 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_14_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI126_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 14
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI126_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_14_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI126_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 14
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI126_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15545,19 +15476,17 @@ define <2 x i64> @ult_15_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ult_15_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI127_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 15
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI127_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 3, 2
; PWR8-NEXT: blr
;
; PWR9-LABEL: ult_15_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI127_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 15
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI127_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15679,19 +15608,17 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
;
; PWR8-LABEL: ugt_15_v2i64:
; PWR8: # %bb.0:
-; PWR8-NEXT: addis 3, 2, .LCPI128_0 at toc@ha
+; PWR8-NEXT: vspltisw 3, 15
; PWR8-NEXT: vpopcntd 2, 2
-; PWR8-NEXT: addi 3, 3, .LCPI128_0 at toc@l
-; PWR8-NEXT: lxvd2x 35, 0, 3
+; PWR8-NEXT: vupklsw 3, 3
; PWR8-NEXT: vcmpgtud 2, 2, 3
; PWR8-NEXT: blr
;
; PWR9-LABEL: ugt_15_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI128_0 at toc@ha
+; PWR9-NEXT: vspltisw 3, 15
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI128_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vupklsw 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15822,10 +15749,9 @@ define <2 x i64> @ult_16_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_16_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI129_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 16
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI129_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -15956,10 +15882,9 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_16_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI130_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 16
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI130_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16090,10 +16015,9 @@ define <2 x i64> @ult_17_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_17_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI131_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 17
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI131_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16224,10 +16148,9 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_17_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI132_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 17
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI132_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16358,10 +16281,9 @@ define <2 x i64> @ult_18_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_18_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI133_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 18
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI133_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16492,10 +16414,9 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_18_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI134_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 18
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI134_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16626,10 +16547,9 @@ define <2 x i64> @ult_19_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_19_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI135_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 19
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI135_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16760,10 +16680,9 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_19_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI136_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 19
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI136_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -16894,10 +16813,9 @@ define <2 x i64> @ult_20_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_20_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI137_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 20
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI137_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17028,10 +16946,9 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_20_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI138_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 20
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI138_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17162,10 +17079,9 @@ define <2 x i64> @ult_21_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_21_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI139_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 21
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI139_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17296,10 +17212,9 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_21_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI140_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 21
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI140_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17430,10 +17345,9 @@ define <2 x i64> @ult_22_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_22_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI141_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 22
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI141_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17564,10 +17478,9 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_22_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI142_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 22
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI142_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17698,10 +17611,9 @@ define <2 x i64> @ult_23_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_23_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI143_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 23
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI143_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17832,10 +17744,9 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_23_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI144_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 23
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI144_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -17966,10 +17877,9 @@ define <2 x i64> @ult_24_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_24_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI145_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 24
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI145_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18100,10 +18010,9 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_24_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI146_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 24
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI146_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18234,10 +18143,9 @@ define <2 x i64> @ult_25_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_25_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI147_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 25
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI147_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18368,10 +18276,9 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_25_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI148_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 25
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI148_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18502,10 +18409,9 @@ define <2 x i64> @ult_26_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_26_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI149_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 26
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI149_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18636,10 +18542,9 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_26_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI150_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 26
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI150_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18770,10 +18675,9 @@ define <2 x i64> @ult_27_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_27_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI151_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 27
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI151_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -18904,10 +18808,9 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_27_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI152_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 27
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI152_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19038,10 +18941,9 @@ define <2 x i64> @ult_28_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_28_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI153_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 28
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI153_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19172,10 +19074,9 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_28_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI154_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 28
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI154_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19306,10 +19207,9 @@ define <2 x i64> @ult_29_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_29_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI155_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 29
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI155_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19440,10 +19340,9 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_29_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI156_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 29
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI156_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19574,10 +19473,9 @@ define <2 x i64> @ult_30_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_30_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI157_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 30
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI157_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19708,10 +19606,9 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_30_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI158_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 30
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI158_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19842,10 +19739,9 @@ define <2 x i64> @ult_31_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_31_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI159_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 31
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI159_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -19976,10 +19872,9 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_31_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI160_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 31
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI160_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20110,10 +20005,9 @@ define <2 x i64> @ult_32_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_32_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI161_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 32
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI161_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20244,10 +20138,9 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_32_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI162_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 32
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI162_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20378,10 +20271,9 @@ define <2 x i64> @ult_33_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_33_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI163_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 33
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI163_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20512,10 +20404,9 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_33_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI164_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 33
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI164_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20646,10 +20537,9 @@ define <2 x i64> @ult_34_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_34_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI165_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 34
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI165_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20780,10 +20670,9 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_34_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI166_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 34
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI166_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -20914,10 +20803,9 @@ define <2 x i64> @ult_35_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_35_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI167_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 35
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI167_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21048,10 +20936,9 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_35_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI168_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 35
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI168_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21182,10 +21069,9 @@ define <2 x i64> @ult_36_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_36_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI169_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 36
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI169_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21316,10 +21202,9 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_36_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI170_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 36
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI170_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21450,10 +21335,9 @@ define <2 x i64> @ult_37_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_37_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI171_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 37
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI171_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21584,10 +21468,9 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_37_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI172_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 37
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI172_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21718,10 +21601,9 @@ define <2 x i64> @ult_38_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_38_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI173_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 38
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI173_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21852,10 +21734,9 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_38_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI174_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 38
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI174_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -21986,10 +21867,9 @@ define <2 x i64> @ult_39_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_39_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI175_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 39
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI175_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22120,10 +22000,9 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_39_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI176_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 39
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI176_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22254,10 +22133,9 @@ define <2 x i64> @ult_40_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_40_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI177_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 40
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI177_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22388,10 +22266,9 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_40_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI178_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 40
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI178_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22522,10 +22399,9 @@ define <2 x i64> @ult_41_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_41_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI179_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 41
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI179_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22656,10 +22532,9 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_41_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI180_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 41
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI180_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22790,10 +22665,9 @@ define <2 x i64> @ult_42_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_42_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI181_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 42
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI181_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -22924,10 +22798,9 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_42_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI182_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 42
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI182_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23058,10 +22931,9 @@ define <2 x i64> @ult_43_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_43_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI183_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 43
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI183_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23192,10 +23064,9 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_43_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI184_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 43
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI184_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23326,10 +23197,9 @@ define <2 x i64> @ult_44_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_44_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI185_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 44
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI185_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23460,10 +23330,9 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_44_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI186_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 44
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI186_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23594,10 +23463,9 @@ define <2 x i64> @ult_45_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_45_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI187_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 45
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI187_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23728,10 +23596,9 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_45_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI188_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 45
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI188_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23862,10 +23729,9 @@ define <2 x i64> @ult_46_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_46_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI189_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 46
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI189_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -23996,10 +23862,9 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_46_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI190_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 46
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI190_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24130,10 +23995,9 @@ define <2 x i64> @ult_47_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_47_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI191_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 47
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI191_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24264,10 +24128,9 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_47_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI192_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 47
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI192_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24398,10 +24261,9 @@ define <2 x i64> @ult_48_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_48_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI193_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 48
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI193_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24532,10 +24394,9 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_48_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI194_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 48
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI194_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24666,10 +24527,9 @@ define <2 x i64> @ult_49_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_49_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI195_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 49
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI195_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24800,10 +24660,9 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_49_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI196_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 49
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI196_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -24934,10 +24793,9 @@ define <2 x i64> @ult_50_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_50_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI197_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 50
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI197_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25068,10 +24926,9 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_50_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI198_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 50
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI198_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25202,10 +25059,9 @@ define <2 x i64> @ult_51_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_51_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI199_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 51
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI199_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25336,10 +25192,9 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_51_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI200_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 51
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI200_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25470,10 +25325,9 @@ define <2 x i64> @ult_52_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_52_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI201_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 52
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI201_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25604,10 +25458,9 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_52_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI202_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 52
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI202_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25738,10 +25591,9 @@ define <2 x i64> @ult_53_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_53_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI203_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 53
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI203_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -25872,10 +25724,9 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_53_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI204_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 53
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI204_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26006,10 +25857,9 @@ define <2 x i64> @ult_54_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_54_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI205_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 54
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI205_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26140,10 +25990,9 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_54_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI206_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 54
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI206_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26274,10 +26123,9 @@ define <2 x i64> @ult_55_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_55_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI207_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 55
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI207_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26408,10 +26256,9 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_55_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI208_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 55
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI208_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26542,10 +26389,9 @@ define <2 x i64> @ult_56_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_56_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI209_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 56
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI209_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26676,10 +26522,9 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_56_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI210_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 56
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI210_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26810,10 +26655,9 @@ define <2 x i64> @ult_57_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_57_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI211_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 57
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI211_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -26944,10 +26788,9 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_57_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI212_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 57
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI212_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27078,10 +26921,9 @@ define <2 x i64> @ult_58_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_58_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI213_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 58
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI213_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27212,10 +27054,9 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_58_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI214_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 58
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI214_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27346,10 +27187,9 @@ define <2 x i64> @ult_59_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_59_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI215_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 59
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI215_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27480,10 +27320,9 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_59_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI216_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 59
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI216_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27614,10 +27453,9 @@ define <2 x i64> @ult_60_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_60_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI217_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 60
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI217_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27748,10 +27586,9 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_60_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI218_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 60
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI218_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -27882,10 +27719,9 @@ define <2 x i64> @ult_61_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_61_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI219_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 61
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI219_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -28016,10 +27852,9 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_61_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI220_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 61
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI220_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -28150,10 +27985,9 @@ define <2 x i64> @ult_62_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_62_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI221_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 62
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI221_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -28284,10 +28118,9 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ugt_62_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI222_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 62
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI222_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 2, 3
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
@@ -28418,10 +28251,9 @@ define <2 x i64> @ult_63_v2i64(<2 x i64> %0) {
;
; PWR9-LABEL: ult_63_v2i64:
; PWR9: # %bb.0:
-; PWR9-NEXT: addis 3, 2, .LCPI223_0 at toc@ha
+; PWR9-NEXT: xxspltib 35, 63
; PWR9-NEXT: vpopcntd 2, 2
-; PWR9-NEXT: addi 3, 3, .LCPI223_0 at toc@l
-; PWR9-NEXT: lxv 35, 0(3)
+; PWR9-NEXT: vextsb2d 3, 3
; PWR9-NEXT: vcmpgtud 2, 3, 2
; PWR9-NEXT: blr
%2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
More information about the llvm-commits
mailing list