[llvm] [PowerPC] fold i128 equality/inequality compares of two loads into a vectorized compare using vcmpequb.p when Altivec is available (PR #158657)
zhijian lin via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 5 13:24:59 PST 2025
https://github.com/diggerlin updated https://github.com/llvm/llvm-project/pull/158657
>From b045ba04bcbe7afaf36f820a46b37e11b1a7f865 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Thu, 14 Aug 2025 20:18:18 +0000
Subject: [PATCH 01/14] implement memcmp with known fix length size.
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 60 +++++++++++++++++++
.../Target/PowerPC/PPCTargetTransformInfo.cpp | 2 +-
2 files changed, 61 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 20fc849ea4aa5..1c5be64e186c0 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15613,6 +15613,66 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
+
+ if (LHS.getOpcode() == ISD::LOAD && RHS.getOpcode() == ISD::LOAD &&
+ LHS.hasOneUse() && RHS.hasOneUse() && LHS.getValueType() == MVT::i128 &&
+ RHS.getValueType() == MVT::i128) {
+ SDLoc DL(N);
+ SelectionDAG &DAG = DCI.DAG;
+ auto *LA = dyn_cast<LoadSDNode>(LHS);
+ auto *LB = dyn_cast<LoadSDNode>(RHS);
+ if (!LA || !LB)
+ return SDValue();
+
+ // If either memory operation (LA or LB) is volatile, do not perform any
+ // optimization or transformation. Volatile operations must be preserved
+ // as written to ensure correct program behavior, so we return an empty
+ // SDValue to indicate no action.
+ if (LA->isVolatile() || LB->isVolatile())
+ return SDValue();
+
+ // Only combine loads if both use the unindexed addressing mode.
+ // PowerPC AltiVec/VMX does not support vector loads or stores with
+ // pre/post-increment addressing. Indexed modes may imply implicit pointer
+ // updates, which are not compatible with AltiVec vector instructions.
+ if (LA->getAddressingMode() != ISD::UNINDEXED ||
+ LB->getAddressingMode() != ISD::UNINDEXED)
+ return SDValue();
+
+ // Only combine loads if both are non-extending loads (ISD::NON_EXTLOAD).
+ // Extending loads (such as ISD::ZEXTLOAD or ISD::SEXTLOAD) perform zero
+ // or sign extension, which may change the loaded value's semantics and
+ // are not compatible with vector loads.
+ if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
+ LB->getExtensionType() != ISD::NON_EXTLOAD)
+ return SDValue();
+ // Build new v16i8 loads using the SAME chain/base/MMO (no extra memory
+ // op).
+ SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
+ LA->getBasePtr(), LA->getMemOperand());
+ SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
+ LB->getBasePtr(), LB->getMemOperand());
+
+ // Replace old loads?¡¥ results (value and chain) so the old nodes die.
+ // DAG.DeleteNode(LHS.getNode());
+ // DAG.DeleteNode(RHS.getNode());
+
+ // SDValue LHSVec = DAG.getBitcast(MVT::v16i8, LHS);
+ // SDValue RHSVec = DAG.getBitcast(MVT::v16i8, RHS);
+ SDValue IntrID =
+ DAG.getTargetConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL,
+ Subtarget.isPPC64() ? MVT::i64 : MVT::i32);
+ SDValue CRSel =
+ DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
+ SDValue Ops[] = {IntrID, CRSel, LHSVec, RHSVec};
+ SDValue PredResult =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, Ops);
+ // ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
+ // so we need to invert the CC opcode.
+ return DAG.getSetCC(DL, N->getValueType(0), PredResult,
+ DAG.getConstant(0, DL, MVT::i32),
+ CC == ISD::SETNE ? ISD::SETEQ : ISD::SETNE);
+ }
}
return DAGCombineTruncBoolExt(N, DCI);
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index b04e8874f58ad..20514d4890c45 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -439,7 +439,7 @@ bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) const {
PPCTTIImpl::TTI::MemCmpExpansionOptions
PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
TTI::MemCmpExpansionOptions Options;
- Options.LoadSizes = {8, 4, 2, 1};
+ Options.LoadSizes = {16, 8, 4, 2, 1};
Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
return Options;
}
>From f6155328dc24395045c446e4da7cdee82aadbc25 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Tue, 9 Sep 2025 15:35:04 +0000
Subject: [PATCH 02/14] delete dead code
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1c5be64e186c0..db2aff4e6e71e 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15646,19 +15646,13 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
LB->getExtensionType() != ISD::NON_EXTLOAD)
return SDValue();
- // Build new v16i8 loads using the SAME chain/base/MMO (no extra memory
+ // Build new v16i8 loads using the same chain/base/MMO (no extra memory
// op).
SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
LA->getBasePtr(), LA->getMemOperand());
SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
LB->getBasePtr(), LB->getMemOperand());
- // Replace old loads?¡¥ results (value and chain) so the old nodes die.
- // DAG.DeleteNode(LHS.getNode());
- // DAG.DeleteNode(RHS.getNode());
-
- // SDValue LHSVec = DAG.getBitcast(MVT::v16i8, LHS);
- // SDValue RHSVec = DAG.getBitcast(MVT::v16i8, RHS);
SDValue IntrID =
DAG.getTargetConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL,
Subtarget.isPPC64() ? MVT::i64 : MVT::i32);
@@ -15667,6 +15661,7 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
SDValue Ops[] = {IntrID, CRSel, LHSVec, RHSVec};
SDValue PredResult =
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, Ops);
+
// ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
// so we need to invert the CC opcode.
return DAG.getSetCC(DL, N->getValueType(0), PredResult,
>From f11a9a6d8aa655bab7ff8c3f8dd788124d199a3c Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Tue, 9 Sep 2025 15:32:03 +0000
Subject: [PATCH 03/14] modify test case based on the new functionality
---
.../memCmpUsedInZeroEqualityComparison.ll | 45 ++++++---------
llvm/test/CodeGen/PowerPC/memcmpIR.ll | 55 +++++--------------
2 files changed, 30 insertions(+), 70 deletions(-)
diff --git a/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll b/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
index 1da40d46aa773..7c4cf7265ff6a 100644
--- a/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
+++ b/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
@@ -35,18 +35,13 @@ define signext i32 @zeroEqualityTest02(ptr %x, ptr %y) {
define signext i32 @zeroEqualityTest01(ptr %x, ptr %y) {
; CHECK-LABEL: zeroEqualityTest01:
; CHECK: # %bb.0:
-; CHECK-NEXT: ld 5, 0(3)
-; CHECK-NEXT: ld 6, 0(4)
-; CHECK-NEXT: cmpld 5, 6
-; CHECK-NEXT: bne 0, .LBB1_2
-; CHECK-NEXT: # %bb.1: # %loadbb1
-; CHECK-NEXT: ld 5, 8(3)
-; CHECK-NEXT: ld 4, 8(4)
-; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: cmpld 5, 4
-; CHECK-NEXT: beqlr 0
-; CHECK-NEXT: .LBB1_2: # %res_block
-; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: lxvd2x 34, 0, 4
+; CHECK-NEXT: lxvd2x 35, 0, 3
+; CHECK-NEXT: vcmpequb. 2, 3, 2
+; CHECK-NEXT: mfocrf 3, 2
+; CHECK-NEXT: rlwinm 3, 3, 25, 31, 31
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: srwi 3, 3, 5
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr %x, ptr %y, i64 16)
%not.tobool = icmp ne i32 %call, 0
@@ -85,7 +80,7 @@ define signext i32 @zeroEqualityTest03(ptr %x, ptr %y) {
; Validate with > 0
define signext i32 @zeroEqualityTest04() {
; CHECK-LABEL: zeroEqualityTest04:
-; CHECK: # %bb.0: # %loadbb
+; CHECK: # %bb.0:
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr @zeroEqualityTest02.buffer1, ptr @zeroEqualityTest02.buffer2, i64 16)
@@ -97,7 +92,7 @@ define signext i32 @zeroEqualityTest04() {
; Validate with < 0
define signext i32 @zeroEqualityTest05() {
; CHECK-LABEL: zeroEqualityTest05:
-; CHECK: # %bb.0: # %loadbb
+; CHECK: # %bb.0:
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr @zeroEqualityTest03.buffer1, ptr @zeroEqualityTest03.buffer2, i64 16)
@@ -109,7 +104,7 @@ define signext i32 @zeroEqualityTest05() {
; Validate with memcmp()?:
define signext i32 @equalityFoldTwoConstants() {
; CHECK-LABEL: equalityFoldTwoConstants:
-; CHECK: # %bb.0: # %loadbb
+; CHECK: # %bb.0:
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr @zeroEqualityTest04.buffer1, ptr @zeroEqualityTest04.buffer2, i64 16)
@@ -122,23 +117,17 @@ define signext i32 @equalityFoldOneConstant(ptr %X) {
; CHECK-LABEL: equalityFoldOneConstant:
; CHECK: # %bb.0:
; CHECK-NEXT: li 5, 1
-; CHECK-NEXT: ld 4, 0(3)
+; CHECK-NEXT: ld 4, 8(3)
+; CHECK-NEXT: ld 3, 0(3)
; CHECK-NEXT: rldic 5, 5, 32, 31
-; CHECK-NEXT: cmpld 4, 5
-; CHECK-NEXT: bne 0, .LBB6_2
-; CHECK-NEXT: # %bb.1: # %loadbb1
+; CHECK-NEXT: xor 3, 3, 5
; CHECK-NEXT: lis 5, -32768
-; CHECK-NEXT: ld 4, 8(3)
-; CHECK-NEXT: li 3, 0
; CHECK-NEXT: ori 5, 5, 1
; CHECK-NEXT: rldic 5, 5, 1, 30
-; CHECK-NEXT: cmpld 4, 5
-; CHECK-NEXT: beq 0, .LBB6_3
-; CHECK-NEXT: .LBB6_2: # %res_block
-; CHECK-NEXT: li 3, 1
-; CHECK-NEXT: .LBB6_3: # %endblock
-; CHECK-NEXT: cntlzw 3, 3
-; CHECK-NEXT: srwi 3, 3, 5
+; CHECK-NEXT: xor 4, 4, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cntlzd 3, 3
+; CHECK-NEXT: rldicl 3, 3, 58, 63
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr @zeroEqualityTest04.buffer1, ptr %X, i64 16)
%not.tobool = icmp eq i32 %call, 0
diff --git a/llvm/test/CodeGen/PowerPC/memcmpIR.ll b/llvm/test/CodeGen/PowerPC/memcmpIR.ll
index b57d2b5116b77..995ecb64d4bdd 100644
--- a/llvm/test/CodeGen/PowerPC/memcmpIR.ll
+++ b/llvm/test/CodeGen/PowerPC/memcmpIR.ll
@@ -4,48 +4,19 @@
define signext i32 @test1(ptr nocapture readonly %buffer1, ptr nocapture readonly %buffer2) {
entry:
; CHECK-LABEL: @test1(
- ; CHECK-LABEL: res_block:{{.*}}
- ; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
- ; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
- ; CHECK-NEXT: br label %endblock
-
- ; CHECK-LABEL: loadbb:{{.*}}
- ; CHECK: [[LOAD1:%[0-9]+]] = load i64, ptr
- ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, ptr
- ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
- ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
-
- ; CHECK-LABEL: loadbb1:{{.*}}
- ; CHECK-NEXT: [[GEP1:%[0-9]+]] = getelementptr i8, ptr {{.*}}, i64 8
- ; CHECK-NEXT: [[GEP2:%[0-9]+]] = getelementptr i8, ptr {{.*}}, i64 8
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]] = load i64, ptr [[GEP1]]
- ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, ptr [[GEP2]]
- ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
- ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
-
+ ; CHECK: [[LOAD0:%[0-9]+]] = load i128, ptr %buffer1, align 1
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]] = load i128, ptr %buffer2, align 1
+ ; CHECK-NEXT: [[CALL1:%[0-9]+]] = call i128 @llvm.bswap.i128(i128 [[LOAD0]])
+ ; CHECK-NEXT: [[CALL2:%[0-9]+]] = call i128 @llvm.bswap.i128(i128 [[LOAD1]])
+ ; CHECK-NEXT: [[CALL3:%[0-9]+]] = call i32 @llvm.ucmp.i32.i128(i128 [[CALL1]], i128 [[CALL2]])
+ ; CHECK-NEXT: ret i32 [[CALL3]]
+
+
; CHECK-BE-LABEL: @test1(
- ; CHECK-BE-LABEL: res_block:{{.*}}
- ; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
- ; CHECK-BE-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
- ; CHECK-BE-NEXT: br label %endblock
-
- ; CHECK-BE-LABEL: loadbb:{{.*}}
- ; CHECK-BE: [[LOAD1:%[0-9]+]] = load i64, ptr
- ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, ptr
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
-
- ; CHECK-BE-LABEL: loadbb1:{{.*}}
- ; CHECK-BE-NEXT: [[GEP1:%[0-9]+]] = getelementptr i8, ptr {{.*}}, i64 8
- ; CHECK-BE-NEXT: [[GEP2:%[0-9]+]] = getelementptr i8, ptr {{.*}}, i64 8
- ; CHECK-BE-NEXT: [[LOAD1:%[0-9]+]] = load i64, ptr [[GEP1]]
- ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, ptr [[GEP2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
+ ; CHECK-BE: [[LOAD0:%[0-9]+]] = load i128, ptr %buffer1, align 1
+ ; CHECK-BE-NEXT: [[LOAD1:%[0-9]+]] = load i128, ptr %buffer2, align 1
+ ; CHECK-BE-NEXT: [[CALL0:%[0-9]+]] = call i32 @llvm.ucmp.i32.i128(i128 [[LOAD0]], i128 [[LOAD1]])
+ ; CHECK-BE-NEXT: ret i32 [[CALL0]]
%call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 16)
ret i32 %call
@@ -156,7 +127,7 @@ entry:
define signext i32 @test4(ptr nocapture readonly %buffer1, ptr nocapture readonly %buffer2) {
entry:
- %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 65)
+ %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 165)
ret i32 %call
}
>From 56f1aa2c5ff46347d3bd2b98b07d388ebc1f7bcd Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Mon, 15 Sep 2025 15:22:00 +0000
Subject: [PATCH 04/14] add check Subtarget hasVSX check
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 104 ++++++++--------
llvm/test/CodeGen/PowerPC/memcmp32_fixsize.ll | 112 ++++--------------
llvm/test/CodeGen/PowerPC/memcmp64_fixsize.ll | 78 +++---------
3 files changed, 91 insertions(+), 203 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index db2aff4e6e71e..1fb3c7c6c505c 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15613,60 +15613,62 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
+ if (Subtarget.hasVSX()) {
+ if (LHS.getOpcode() == ISD::LOAD && RHS.getOpcode() == ISD::LOAD &&
+ LHS.hasOneUse() && RHS.hasOneUse() &&
+ LHS.getValueType() == MVT::i128 && RHS.getValueType() == MVT::i128) {
+ SDLoc DL(N);
+ SelectionDAG &DAG = DCI.DAG;
+ auto *LA = dyn_cast<LoadSDNode>(LHS);
+ auto *LB = dyn_cast<LoadSDNode>(RHS);
+ if (!LA || !LB)
+ return SDValue();
- if (LHS.getOpcode() == ISD::LOAD && RHS.getOpcode() == ISD::LOAD &&
- LHS.hasOneUse() && RHS.hasOneUse() && LHS.getValueType() == MVT::i128 &&
- RHS.getValueType() == MVT::i128) {
- SDLoc DL(N);
- SelectionDAG &DAG = DCI.DAG;
- auto *LA = dyn_cast<LoadSDNode>(LHS);
- auto *LB = dyn_cast<LoadSDNode>(RHS);
- if (!LA || !LB)
- return SDValue();
-
- // If either memory operation (LA or LB) is volatile, do not perform any
- // optimization or transformation. Volatile operations must be preserved
- // as written to ensure correct program behavior, so we return an empty
- // SDValue to indicate no action.
- if (LA->isVolatile() || LB->isVolatile())
- return SDValue();
+ // If either memory operation (LA or LB) is volatile, do not perform any
+ // optimization or transformation. Volatile operations must be preserved
+ // as written to ensure correct program behavior, so we return an empty
+ // SDValue to indicate no action.
+ if (LA->isVolatile() || LB->isVolatile())
+ return SDValue();
- // Only combine loads if both use the unindexed addressing mode.
- // PowerPC AltiVec/VMX does not support vector loads or stores with
- // pre/post-increment addressing. Indexed modes may imply implicit pointer
- // updates, which are not compatible with AltiVec vector instructions.
- if (LA->getAddressingMode() != ISD::UNINDEXED ||
- LB->getAddressingMode() != ISD::UNINDEXED)
- return SDValue();
+ // Only combine loads if both use the unindexed addressing mode.
+ // PowerPC AltiVec/VMX does not support vector loads or stores with
+ // pre/post-increment addressing. Indexed modes may imply implicit
+ // pointer updates, which are not compatible with AltiVec vector
+ // instructions.
+ if (LA->getAddressingMode() != ISD::UNINDEXED ||
+ LB->getAddressingMode() != ISD::UNINDEXED)
+ return SDValue();
- // Only combine loads if both are non-extending loads (ISD::NON_EXTLOAD).
- // Extending loads (such as ISD::ZEXTLOAD or ISD::SEXTLOAD) perform zero
- // or sign extension, which may change the loaded value's semantics and
- // are not compatible with vector loads.
- if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
- LB->getExtensionType() != ISD::NON_EXTLOAD)
- return SDValue();
- // Build new v16i8 loads using the same chain/base/MMO (no extra memory
- // op).
- SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
- LA->getBasePtr(), LA->getMemOperand());
- SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
- LB->getBasePtr(), LB->getMemOperand());
-
- SDValue IntrID =
- DAG.getTargetConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL,
- Subtarget.isPPC64() ? MVT::i64 : MVT::i32);
- SDValue CRSel =
- DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
- SDValue Ops[] = {IntrID, CRSel, LHSVec, RHSVec};
- SDValue PredResult =
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, Ops);
-
- // ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
- // so we need to invert the CC opcode.
- return DAG.getSetCC(DL, N->getValueType(0), PredResult,
- DAG.getConstant(0, DL, MVT::i32),
- CC == ISD::SETNE ? ISD::SETEQ : ISD::SETNE);
+ // Only combine loads if both are non-extending loads
+ // (ISD::NON_EXTLOAD). Extending loads (such as ISD::ZEXTLOAD or
+ // ISD::SEXTLOAD) perform zero or sign extension, which may change the
+ // loaded value's semantics and are not compatible with vector loads.
+ if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
+ LB->getExtensionType() != ISD::NON_EXTLOAD)
+ return SDValue();
+ // Build new v16i8 loads using the same chain/base/MMO (no extra memory
+ // op).
+ SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
+ LA->getBasePtr(), LA->getMemOperand());
+ SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
+ LB->getBasePtr(), LB->getMemOperand());
+
+ SDValue IntrID =
+ DAG.getTargetConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL,
+ Subtarget.isPPC64() ? MVT::i64 : MVT::i32);
+ SDValue CRSel =
+ DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
+ SDValue Ops[] = {IntrID, CRSel, LHSVec, RHSVec};
+ SDValue PredResult =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, Ops);
+
+ // ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
+ // so we need to invert the CC opcode.
+ return DAG.getSetCC(DL, N->getValueType(0), PredResult,
+ DAG.getConstant(0, DL, MVT::i32),
+ CC == ISD::SETNE ? ISD::SETEQ : ISD::SETNE);
+ }
}
}
diff --git a/llvm/test/CodeGen/PowerPC/memcmp32_fixsize.ll b/llvm/test/CodeGen/PowerPC/memcmp32_fixsize.ll
index f5483ad2a7c3f..7dfaac1a8ae37 100644
--- a/llvm/test/CodeGen/PowerPC/memcmp32_fixsize.ll
+++ b/llvm/test/CodeGen/PowerPC/memcmp32_fixsize.ll
@@ -14,110 +14,38 @@
define dso_local signext range(i32 0, 2) i32 @cmpeq16(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b) {
; CHECK-AIX32-P8-LABEL: cmpeq16:
; CHECK-AIX32-P8: # %bb.0: # %entry
-; CHECK-AIX32-P8-NEXT: lwz r5, 4(r3)
-; CHECK-AIX32-P8-NEXT: lwz r6, 0(r3)
-; CHECK-AIX32-P8-NEXT: lwz r7, 4(r4)
-; CHECK-AIX32-P8-NEXT: lwz r8, 0(r4)
-; CHECK-AIX32-P8-NEXT: xor r6, r6, r8
-; CHECK-AIX32-P8-NEXT: xor r5, r5, r7
-; CHECK-AIX32-P8-NEXT: or. r5, r5, r6
-; CHECK-AIX32-P8-NEXT: bne cr0, L..BB0_2
-; CHECK-AIX32-P8-NEXT: # %bb.1: # %loadbb1
-; CHECK-AIX32-P8-NEXT: lwz r5, 12(r3)
-; CHECK-AIX32-P8-NEXT: lwz r3, 8(r3)
-; CHECK-AIX32-P8-NEXT: lwz r6, 12(r4)
-; CHECK-AIX32-P8-NEXT: lwz r4, 8(r4)
-; CHECK-AIX32-P8-NEXT: xor r3, r3, r4
-; CHECK-AIX32-P8-NEXT: xor r4, r5, r6
-; CHECK-AIX32-P8-NEXT: or. r3, r4, r3
-; CHECK-AIX32-P8-NEXT: li r3, 0
-; CHECK-AIX32-P8-NEXT: beq cr0, L..BB0_3
-; CHECK-AIX32-P8-NEXT: L..BB0_2: # %res_block
-; CHECK-AIX32-P8-NEXT: li r3, 1
-; CHECK-AIX32-P8-NEXT: L..BB0_3: # %endblock
-; CHECK-AIX32-P8-NEXT: cntlzw r3, r3
-; CHECK-AIX32-P8-NEXT: rlwinm r3, r3, 27, 31, 31
+; CHECK-AIX32-P8-NEXT: lxvw4x vs34, 0, r4
+; CHECK-AIX32-P8-NEXT: lxvw4x vs35, 0, r3
+; CHECK-AIX32-P8-NEXT: vcmpequb. v2, v3, v2
+; CHECK-AIX32-P8-NEXT: mfocrf r3, 2
+; CHECK-AIX32-P8-NEXT: rlwinm r3, r3, 25, 31, 31
; CHECK-AIX32-P8-NEXT: blr
;
; CHECK-AIX32-P10-LABEL: cmpeq16:
; CHECK-AIX32-P10: # %bb.0: # %entry
-; CHECK-AIX32-P10-NEXT: lwz r5, 4(r3)
-; CHECK-AIX32-P10-NEXT: lwz r6, 0(r3)
-; CHECK-AIX32-P10-NEXT: lwz r7, 4(r4)
-; CHECK-AIX32-P10-NEXT: xor r5, r5, r7
-; CHECK-AIX32-P10-NEXT: lwz r8, 0(r4)
-; CHECK-AIX32-P10-NEXT: xor r6, r6, r8
-; CHECK-AIX32-P10-NEXT: or. r5, r5, r6
-; CHECK-AIX32-P10-NEXT: bne cr0, L..BB0_2
-; CHECK-AIX32-P10-NEXT: # %bb.1: # %loadbb1
-; CHECK-AIX32-P10-NEXT: lwz r5, 12(r3)
-; CHECK-AIX32-P10-NEXT: lwz r3, 8(r3)
-; CHECK-AIX32-P10-NEXT: lwz r6, 12(r4)
-; CHECK-AIX32-P10-NEXT: lwz r4, 8(r4)
-; CHECK-AIX32-P10-NEXT: xor r3, r3, r4
-; CHECK-AIX32-P10-NEXT: xor r4, r5, r6
-; CHECK-AIX32-P10-NEXT: or. r3, r4, r3
-; CHECK-AIX32-P10-NEXT: li r3, 0
-; CHECK-AIX32-P10-NEXT: beq cr0, L..BB0_3
-; CHECK-AIX32-P10-NEXT: L..BB0_2: # %res_block
-; CHECK-AIX32-P10-NEXT: li r3, 1
-; CHECK-AIX32-P10-NEXT: L..BB0_3: # %endblock
-; CHECK-AIX32-P10-NEXT: cntlzw r3, r3
-; CHECK-AIX32-P10-NEXT: rlwinm r3, r3, 27, 31, 31
+; CHECK-AIX32-P10-NEXT: lxv vs34, 0(r4)
+; CHECK-AIX32-P10-NEXT: lxv vs35, 0(r3)
+; CHECK-AIX32-P10-NEXT: vcmpequb. v2, v3, v2
+; CHECK-AIX32-P10-NEXT: setbc r3, 4*cr6+lt
; CHECK-AIX32-P10-NEXT: blr
;
; CHECK-LINUX32-P8-LABEL: cmpeq16:
; CHECK-LINUX32-P8: # %bb.0: # %entry
-; CHECK-LINUX32-P8-NEXT: lwz r5, 0(r3)
-; CHECK-LINUX32-P8-NEXT: lwz r6, 4(r3)
-; CHECK-LINUX32-P8-NEXT: lwz r7, 0(r4)
-; CHECK-LINUX32-P8-NEXT: lwz r8, 4(r4)
-; CHECK-LINUX32-P8-NEXT: xor r6, r6, r8
-; CHECK-LINUX32-P8-NEXT: xor r5, r5, r7
-; CHECK-LINUX32-P8-NEXT: or. r5, r5, r6
-; CHECK-LINUX32-P8-NEXT: bne cr0, .LBB0_2
-; CHECK-LINUX32-P8-NEXT: # %bb.1: # %loadbb1
-; CHECK-LINUX32-P8-NEXT: lwz r5, 8(r3)
-; CHECK-LINUX32-P8-NEXT: lwz r3, 12(r3)
-; CHECK-LINUX32-P8-NEXT: lwz r6, 8(r4)
-; CHECK-LINUX32-P8-NEXT: lwz r4, 12(r4)
-; CHECK-LINUX32-P8-NEXT: xor r3, r3, r4
-; CHECK-LINUX32-P8-NEXT: xor r4, r5, r6
-; CHECK-LINUX32-P8-NEXT: or. r3, r4, r3
-; CHECK-LINUX32-P8-NEXT: li r3, 0
-; CHECK-LINUX32-P8-NEXT: beq cr0, .LBB0_3
-; CHECK-LINUX32-P8-NEXT: .LBB0_2: # %res_block
-; CHECK-LINUX32-P8-NEXT: li r3, 1
-; CHECK-LINUX32-P8-NEXT: .LBB0_3: # %endblock
-; CHECK-LINUX32-P8-NEXT: cntlzw r3, r3
-; CHECK-LINUX32-P8-NEXT: rlwinm r3, r3, 27, 31, 31
+; CHECK-LINUX32-P8-NEXT: lxvd2x vs0, 0, r4
+; CHECK-LINUX32-P8-NEXT: xxswapd vs34, vs0
+; CHECK-LINUX32-P8-NEXT: lxvd2x vs0, 0, r3
+; CHECK-LINUX32-P8-NEXT: xxswapd vs35, vs0
+; CHECK-LINUX32-P8-NEXT: vcmpequb. v2, v3, v2
+; CHECK-LINUX32-P8-NEXT: mfocrf r3, 2
+; CHECK-LINUX32-P8-NEXT: rlwinm r3, r3, 25, 31, 31
; CHECK-LINUX32-P8-NEXT: blr
;
; CHECK-LINUX32-P10-LABEL: cmpeq16:
; CHECK-LINUX32-P10: # %bb.0: # %entry
-; CHECK-LINUX32-P10-NEXT: lwz r5, 0(r3)
-; CHECK-LINUX32-P10-NEXT: lwz r6, 4(r3)
-; CHECK-LINUX32-P10-NEXT: lwz r7, 0(r4)
-; CHECK-LINUX32-P10-NEXT: xor r5, r5, r7
-; CHECK-LINUX32-P10-NEXT: lwz r8, 4(r4)
-; CHECK-LINUX32-P10-NEXT: xor r6, r6, r8
-; CHECK-LINUX32-P10-NEXT: or. r5, r5, r6
-; CHECK-LINUX32-P10-NEXT: bne cr0, .LBB0_2
-; CHECK-LINUX32-P10-NEXT: # %bb.1: # %loadbb1
-; CHECK-LINUX32-P10-NEXT: lwz r5, 8(r3)
-; CHECK-LINUX32-P10-NEXT: lwz r3, 12(r3)
-; CHECK-LINUX32-P10-NEXT: lwz r6, 8(r4)
-; CHECK-LINUX32-P10-NEXT: lwz r4, 12(r4)
-; CHECK-LINUX32-P10-NEXT: xor r3, r3, r4
-; CHECK-LINUX32-P10-NEXT: xor r4, r5, r6
-; CHECK-LINUX32-P10-NEXT: or. r3, r4, r3
-; CHECK-LINUX32-P10-NEXT: li r3, 0
-; CHECK-LINUX32-P10-NEXT: beq cr0, .LBB0_3
-; CHECK-LINUX32-P10-NEXT: .LBB0_2: # %res_block
-; CHECK-LINUX32-P10-NEXT: li r3, 1
-; CHECK-LINUX32-P10-NEXT: .LBB0_3: # %endblock
-; CHECK-LINUX32-P10-NEXT: cntlzw r3, r3
-; CHECK-LINUX32-P10-NEXT: rlwinm r3, r3, 27, 31, 31
+; CHECK-LINUX32-P10-NEXT: lxv vs34, 0(r4)
+; CHECK-LINUX32-P10-NEXT: lxv vs35, 0(r3)
+; CHECK-LINUX32-P10-NEXT: vcmpequb. v2, v3, v2
+; CHECK-LINUX32-P10-NEXT: setbc r3, 4*cr6+lt
; CHECK-LINUX32-P10-NEXT: blr
entry:
%bcmp = tail call i32 @bcmp(ptr noundef nonnull dereferenceable(16) %a, ptr noundef nonnull dereferenceable(16) %b, i32 16)
diff --git a/llvm/test/CodeGen/PowerPC/memcmp64_fixsize.ll b/llvm/test/CodeGen/PowerPC/memcmp64_fixsize.ll
index 216b7638642d4..bd703b9d35cf7 100644
--- a/llvm/test/CodeGen/PowerPC/memcmp64_fixsize.ll
+++ b/llvm/test/CodeGen/PowerPC/memcmp64_fixsize.ll
@@ -14,78 +14,36 @@
define dso_local signext range(i32 0, 2) i32 @cmpeq16(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b) {
; CHECK-AIX64-32-P8-LABEL: cmpeq16:
; CHECK-AIX64-32-P8: # %bb.0: # %entry
-; CHECK-AIX64-32-P8-NEXT: ld r5, 0(r3)
-; CHECK-AIX64-32-P8-NEXT: ld r6, 0(r4)
-; CHECK-AIX64-32-P8-NEXT: cmpld r5, r6
-; CHECK-AIX64-32-P8-NEXT: bne cr0, L..BB0_2
-; CHECK-AIX64-32-P8-NEXT: # %bb.1: # %loadbb1
-; CHECK-AIX64-32-P8-NEXT: ld r5, 8(r3)
-; CHECK-AIX64-32-P8-NEXT: ld r4, 8(r4)
-; CHECK-AIX64-32-P8-NEXT: li r3, 0
-; CHECK-AIX64-32-P8-NEXT: cmpld r5, r4
-; CHECK-AIX64-32-P8-NEXT: beq cr0, L..BB0_3
-; CHECK-AIX64-32-P8-NEXT: L..BB0_2: # %res_block
-; CHECK-AIX64-32-P8-NEXT: li r3, 1
-; CHECK-AIX64-32-P8-NEXT: L..BB0_3: # %endblock
-; CHECK-AIX64-32-P8-NEXT: cntlzw r3, r3
-; CHECK-AIX64-32-P8-NEXT: srwi r3, r3, 5
+; CHECK-AIX64-32-P8-NEXT: lxvw4x vs34, 0, r4
+; CHECK-AIX64-32-P8-NEXT: lxvw4x vs35, 0, r3
+; CHECK-AIX64-32-P8-NEXT: vcmpequb. v2, v3, v2
+; CHECK-AIX64-32-P8-NEXT: mfocrf r3, 2
+; CHECK-AIX64-32-P8-NEXT: rlwinm r3, r3, 25, 31, 31
; CHECK-AIX64-32-P8-NEXT: blr
;
; CHECK-AIX64-32-P10-LABEL: cmpeq16:
; CHECK-AIX64-32-P10: # %bb.0: # %entry
-; CHECK-AIX64-32-P10-NEXT: ld r5, 0(r3)
-; CHECK-AIX64-32-P10-NEXT: ld r6, 0(r4)
-; CHECK-AIX64-32-P10-NEXT: cmpld r5, r6
-; CHECK-AIX64-32-P10-NEXT: bne cr0, L..BB0_2
-; CHECK-AIX64-32-P10-NEXT: # %bb.1: # %loadbb1
-; CHECK-AIX64-32-P10-NEXT: ld r5, 8(r3)
-; CHECK-AIX64-32-P10-NEXT: ld r4, 8(r4)
-; CHECK-AIX64-32-P10-NEXT: li r3, 0
-; CHECK-AIX64-32-P10-NEXT: cmpld r5, r4
-; CHECK-AIX64-32-P10-NEXT: beq cr0, L..BB0_3
-; CHECK-AIX64-32-P10-NEXT: L..BB0_2: # %res_block
-; CHECK-AIX64-32-P10-NEXT: li r3, 1
-; CHECK-AIX64-32-P10-NEXT: L..BB0_3: # %endblock
-; CHECK-AIX64-32-P10-NEXT: cntlzw r3, r3
-; CHECK-AIX64-32-P10-NEXT: rlwinm r3, r3, 27, 31, 31
+; CHECK-AIX64-32-P10-NEXT: lxv vs34, 0(r4)
+; CHECK-AIX64-32-P10-NEXT: lxv vs35, 0(r3)
+; CHECK-AIX64-32-P10-NEXT: vcmpequb. v2, v3, v2
+; CHECK-AIX64-32-P10-NEXT: setbc r3, 4*cr6+lt
; CHECK-AIX64-32-P10-NEXT: blr
;
; CHECK-LINUX64-P8-LABEL: cmpeq16:
; CHECK-LINUX64-P8: # %bb.0: # %entry
-; CHECK-LINUX64-P8-NEXT: ld r5, 0(r3)
-; CHECK-LINUX64-P8-NEXT: ld r6, 0(r4)
-; CHECK-LINUX64-P8-NEXT: cmpld r5, r6
-; CHECK-LINUX64-P8-NEXT: bne cr0, .LBB0_2
-; CHECK-LINUX64-P8-NEXT: # %bb.1: # %loadbb1
-; CHECK-LINUX64-P8-NEXT: ld r5, 8(r3)
-; CHECK-LINUX64-P8-NEXT: ld r4, 8(r4)
-; CHECK-LINUX64-P8-NEXT: li r3, 0
-; CHECK-LINUX64-P8-NEXT: cmpld r5, r4
-; CHECK-LINUX64-P8-NEXT: beq cr0, .LBB0_3
-; CHECK-LINUX64-P8-NEXT: .LBB0_2: # %res_block
-; CHECK-LINUX64-P8-NEXT: li r3, 1
-; CHECK-LINUX64-P8-NEXT: .LBB0_3: # %endblock
-; CHECK-LINUX64-P8-NEXT: cntlzw r3, r3
-; CHECK-LINUX64-P8-NEXT: srwi r3, r3, 5
+; CHECK-LINUX64-P8-NEXT: lxvd2x vs34, 0, r4
+; CHECK-LINUX64-P8-NEXT: lxvd2x vs35, 0, r3
+; CHECK-LINUX64-P8-NEXT: vcmpequb. v2, v3, v2
+; CHECK-LINUX64-P8-NEXT: mfocrf r3, 2
+; CHECK-LINUX64-P8-NEXT: rlwinm r3, r3, 25, 31, 31
; CHECK-LINUX64-P8-NEXT: blr
;
; CHECK-LINUX64-P10-LABEL: cmpeq16:
; CHECK-LINUX64-P10: # %bb.0: # %entry
-; CHECK-LINUX64-P10-NEXT: ld r5, 0(r3)
-; CHECK-LINUX64-P10-NEXT: ld r6, 0(r4)
-; CHECK-LINUX64-P10-NEXT: cmpld r5, r6
-; CHECK-LINUX64-P10-NEXT: bne cr0, .LBB0_2
-; CHECK-LINUX64-P10-NEXT: # %bb.1: # %loadbb1
-; CHECK-LINUX64-P10-NEXT: ld r5, 8(r3)
-; CHECK-LINUX64-P10-NEXT: ld r4, 8(r4)
-; CHECK-LINUX64-P10-NEXT: li r3, 0
-; CHECK-LINUX64-P10-NEXT: cmpld r5, r4
-; CHECK-LINUX64-P10-NEXT: beq cr0, .LBB0_3
-; CHECK-LINUX64-P10-NEXT: .LBB0_2: # %res_block
-; CHECK-LINUX64-P10-NEXT: li r3, 1
-; CHECK-LINUX64-P10-NEXT: .LBB0_3: # %endblock
-; CHECK-LINUX64-P10-NEXT: cntlzw r3, r3
-; CHECK-LINUX64-P10-NEXT: rlwinm r3, r3, 27, 31, 31
+; CHECK-LINUX64-P10-NEXT: lxv vs34, 0(r4)
+; CHECK-LINUX64-P10-NEXT: lxv vs35, 0(r3)
+; CHECK-LINUX64-P10-NEXT: vcmpequb. v2, v3, v2
+; CHECK-LINUX64-P10-NEXT: setbc r3, 4*cr6+lt
; CHECK-LINUX64-P10-NEXT: blr
entry:
%bcmp = tail call i32 @bcmp(ptr noundef nonnull dereferenceable(16) %a, ptr noundef nonnull dereferenceable(16) %b, i64 16)
>From f42d13d05e464ce984bde36ddd64d9c35f930e55 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Fri, 19 Sep 2025 15:51:36 +0000
Subject: [PATCH 05/14] address comment
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 36 ++++++++++++++++++---
1 file changed, 31 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1fb3c7c6c505c..41abfd29b60df 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15613,6 +15613,16 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
+
+ // Optimization: Fold i128 equality/inequality compares of two loads into a
+ // vectorized compare using vcmpequb.p when VSX is available.
+ //
+ // Rationale:
+ // A scalar i128 SETCC (eq/ne) normally lowers to multiple scalar ops.
+ // On VSX-capable subtargets, we can instead reinterpret the i128 loads
+ // as v16i8 vectors and use the Altivec/VSX vcmpequb.p instruction to
+ // perform a full 128-bit equality check in a single vector compare.
+
if (Subtarget.hasVSX()) {
if (LHS.getOpcode() == ISD::LOAD && RHS.getOpcode() == ISD::LOAD &&
LHS.hasOneUse() && RHS.hasOneUse() &&
@@ -15647,8 +15657,25 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
LB->getExtensionType() != ISD::NON_EXTLOAD)
return SDValue();
- // Build new v16i8 loads using the same chain/base/MMO (no extra memory
- // op).
+
+ // Following code transforms the DAG
+ // t0: ch,glue = EntryToken
+ // t2: i64,ch = CopyFromReg t0, Register:i64 %0
+ // t3: i128,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
+ // undef:i64 t4: i64,ch = CopyFromReg t0, Register:i64 %1 t5: i128,ch =
+ // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64 t6: i1 =
+ // setcc t3, t5, setne:ch
+ //
+ // ---->
+ //
+ // t0: ch,glue = EntryToken
+ // t2: i64,ch = CopyFromReg t0, Register:i64 %0
+ // t3: v16i8,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
+ // undef:i64 t4: i64,ch = CopyFromReg t0, Register:i64 %1 t5: v16i8,ch =
+ // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64 t6: i32 =
+ // llvm.ppc.altivec.vcmpequb.p TargetConstant:i32<10505>,
+ // Constant:i32<2>, t3, t5 t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
+
SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
LA->getBasePtr(), LA->getMemOperand());
SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
@@ -15659,9 +15686,8 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
Subtarget.isPPC64() ? MVT::i64 : MVT::i32);
SDValue CRSel =
DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
- SDValue Ops[] = {IntrID, CRSel, LHSVec, RHSVec};
- SDValue PredResult =
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, Ops);
+ SDValue PredResult = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
+ IntrID, CRSel, LHSVec, RHSVec);
// ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
// so we need to invert the CC opcode.
>From 114d929ba39c0f987ac33703e8575967a8c401ea Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Wed, 24 Sep 2025 15:13:20 +0000
Subject: [PATCH 06/14] address comment
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 41abfd29b60df..f99a36bf17fc4 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15632,14 +15632,14 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
auto *LA = dyn_cast<LoadSDNode>(LHS);
auto *LB = dyn_cast<LoadSDNode>(RHS);
if (!LA || !LB)
- return SDValue();
+ return DAGCombineTruncBoolExt(N, DCI);
// If either memory operation (LA or LB) is volatile, do not perform any
// optimization or transformation. Volatile operations must be preserved
// as written to ensure correct program behavior, so we return an empty
// SDValue to indicate no action.
if (LA->isVolatile() || LB->isVolatile())
- return SDValue();
+ return DAGCombineTruncBoolExt(N, DCI);
// Only combine loads if both use the unindexed addressing mode.
// PowerPC AltiVec/VMX does not support vector loads or stores with
@@ -15648,7 +15648,7 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
// instructions.
if (LA->getAddressingMode() != ISD::UNINDEXED ||
LB->getAddressingMode() != ISD::UNINDEXED)
- return SDValue();
+ return DAGCombineTruncBoolExt(N, DCI);
// Only combine loads if both are non-extending loads
// (ISD::NON_EXTLOAD). Extending loads (such as ISD::ZEXTLOAD or
@@ -15656,7 +15656,7 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
// loaded value's semantics and are not compatible with vector loads.
if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
LB->getExtensionType() != ISD::NON_EXTLOAD)
- return SDValue();
+ return DAGCombineTruncBoolExt(N, DCI);
// Following code transforms the DAG
// t0: ch,glue = EntryToken
>From 9ea50e9e3c0ba49f81fdff199bc54b3946f539ce Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Tue, 14 Oct 2025 16:10:24 +0000
Subject: [PATCH 07/14] address comment
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 168 +++++++++++---------
llvm/test/CodeGen/PowerPC/memcmpIR.ll | 2 +-
2 files changed, 95 insertions(+), 75 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index f99a36bf17fc4..422d8a4f835bd 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15587,6 +15587,45 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
ShiftCst);
}
+static bool canConvertToVcmpequb(SDValue &LHS, SDValue RHS) {
+
+ if (LHS.getOpcode() != ISD::LOAD || RHS.getOpcode() != ISD::LOAD ||
+ !LHS.hasOneUse() || !RHS.hasOneUse() || LHS.getValueType() != MVT::i128 ||
+ RHS.getValueType() != MVT::i128)
+ return false;
+
+ auto *LA = dyn_cast<LoadSDNode>(LHS);
+ auto *LB = dyn_cast<LoadSDNode>(RHS);
+ if (!LA || !LB)
+ return false;
+
+ // If either memory operation (LA or LB) is volatile, do not perform any
+ // optimization or transformation. Volatile operations must be preserved
+ // as written to ensure correct program behavior, so we return an empty
+ // SDValue to indicate no action.
+ if (LA->isVolatile() || LB->isVolatile())
+ return false;
+
+ // Only combine loads if both use the unindexed addressing mode.
+ // PowerPC AltiVec/VMX does not support vector loads or stores with
+ // pre/post-increment addressing. Indexed modes may imply implicit
+ // pointer updates, which are not compatible with AltiVec vector
+ // instructions.
+ if (LA->getAddressingMode() != ISD::UNINDEXED ||
+ LB->getAddressingMode() != ISD::UNINDEXED)
+ return false;
+
+ // Only combine loads if both are non-extending loads
+ // (ISD::NON_EXTLOAD). Extending loads (such as ISD::ZEXTLOAD or
+ // ISD::SEXTLOAD) perform zero or sign extension, which may change the
+ // loaded value's semantics and are not compatible with vector loads.
+ if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
+ LB->getExtensionType() != ISD::NON_EXTLOAD)
+ return false;
+
+ return true;
+}
+
SDValue PPCTargetLowering::combineSetCC(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::SETCC &&
@@ -15615,86 +15654,67 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
}
// Optimization: Fold i128 equality/inequality compares of two loads into a
- // vectorized compare using vcmpequb.p when VSX is available.
+ // vectorized compare using vcmpequb.p when Altivec is available.
//
// Rationale:
// A scalar i128 SETCC (eq/ne) normally lowers to multiple scalar ops.
// On VSX-capable subtargets, we can instead reinterpret the i128 loads
- // as v16i8 vectors and use the Altivec/VSX vcmpequb.p instruction to
+ // as v16i8 vectors and use the Altive vcmpequb.p instruction to
// perform a full 128-bit equality check in a single vector compare.
+ //
+ // Example Result:
+ // This transformation replaces memcmp(a, b, 16) with two vector loads
+ // and one vector compare instruction.
- if (Subtarget.hasVSX()) {
- if (LHS.getOpcode() == ISD::LOAD && RHS.getOpcode() == ISD::LOAD &&
- LHS.hasOneUse() && RHS.hasOneUse() &&
- LHS.getValueType() == MVT::i128 && RHS.getValueType() == MVT::i128) {
- SDLoc DL(N);
- SelectionDAG &DAG = DCI.DAG;
- auto *LA = dyn_cast<LoadSDNode>(LHS);
- auto *LB = dyn_cast<LoadSDNode>(RHS);
- if (!LA || !LB)
- return DAGCombineTruncBoolExt(N, DCI);
-
- // If either memory operation (LA or LB) is volatile, do not perform any
- // optimization or transformation. Volatile operations must be preserved
- // as written to ensure correct program behavior, so we return an empty
- // SDValue to indicate no action.
- if (LA->isVolatile() || LB->isVolatile())
- return DAGCombineTruncBoolExt(N, DCI);
-
- // Only combine loads if both use the unindexed addressing mode.
- // PowerPC AltiVec/VMX does not support vector loads or stores with
- // pre/post-increment addressing. Indexed modes may imply implicit
- // pointer updates, which are not compatible with AltiVec vector
- // instructions.
- if (LA->getAddressingMode() != ISD::UNINDEXED ||
- LB->getAddressingMode() != ISD::UNINDEXED)
- return DAGCombineTruncBoolExt(N, DCI);
-
- // Only combine loads if both are non-extending loads
- // (ISD::NON_EXTLOAD). Extending loads (such as ISD::ZEXTLOAD or
- // ISD::SEXTLOAD) perform zero or sign extension, which may change the
- // loaded value's semantics and are not compatible with vector loads.
- if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
- LB->getExtensionType() != ISD::NON_EXTLOAD)
- return DAGCombineTruncBoolExt(N, DCI);
-
- // Following code transforms the DAG
- // t0: ch,glue = EntryToken
- // t2: i64,ch = CopyFromReg t0, Register:i64 %0
- // t3: i128,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
- // undef:i64 t4: i64,ch = CopyFromReg t0, Register:i64 %1 t5: i128,ch =
- // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64 t6: i1 =
- // setcc t3, t5, setne:ch
- //
- // ---->
- //
- // t0: ch,glue = EntryToken
- // t2: i64,ch = CopyFromReg t0, Register:i64 %0
- // t3: v16i8,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
- // undef:i64 t4: i64,ch = CopyFromReg t0, Register:i64 %1 t5: v16i8,ch =
- // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64 t6: i32 =
- // llvm.ppc.altivec.vcmpequb.p TargetConstant:i32<10505>,
- // Constant:i32<2>, t3, t5 t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
-
- SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
- LA->getBasePtr(), LA->getMemOperand());
- SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
- LB->getBasePtr(), LB->getMemOperand());
-
- SDValue IntrID =
- DAG.getTargetConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL,
- Subtarget.isPPC64() ? MVT::i64 : MVT::i32);
- SDValue CRSel =
- DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
- SDValue PredResult = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
- IntrID, CRSel, LHSVec, RHSVec);
-
- // ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
- // so we need to invert the CC opcode.
- return DAG.getSetCC(DL, N->getValueType(0), PredResult,
- DAG.getConstant(0, DL, MVT::i32),
- CC == ISD::SETNE ? ISD::SETEQ : ISD::SETNE);
- }
+ if (Subtarget.hasAltivec() && canConvertToVcmpequb(LHS, RHS)) {
+ SDLoc DL(N);
+ SelectionDAG &DAG = DCI.DAG;
+ auto *LA = dyn_cast<LoadSDNode>(LHS);
+ auto *LB = dyn_cast<LoadSDNode>(RHS);
+
+ assert((LA && LB) && "LA and LB must be LoadSDNode");
+
+ // Following code transforms the DAG
+ // t0: ch,glue = EntryToken
+ // t2: i64,ch = CopyFromReg t0, Register:i64 %0
+ // t3: i128,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
+ // undef:i64
+ // t4: i64,ch = CopyFromReg t0, Register:i64 %1
+ // t5: i128,ch =
+ // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64 t6: i1 =
+ // setcc t3, t5, setne:ch
+ //
+ // ---->
+ //
+ // t0: ch,glue = EntryToken
+ // t2: i64,ch = CopyFromReg t0, Register:i64 %0
+ // t3: v16i8,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
+ // undef:i64
+ // t4: i64,ch = CopyFromReg t0, Register:i64 %1
+ // t5: v16i8,ch =
+ // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64
+ // t6: i32 =
+ // llvm.ppc.altivec.vcmpequb.p TargetConstant:i32<10505>,
+ // Constant:i32<2>, t3, t5
+ // t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
+
+ SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
+ LA->getBasePtr(), LA->getMemOperand());
+ SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
+ LB->getBasePtr(), LB->getMemOperand());
+
+ SDValue IntrID =
+ DAG.getConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL, MVT::i32);
+ SDValue CRSel =
+ DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
+ SDValue PredResult = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
+ IntrID, CRSel, LHSVec, RHSVec);
+
+ // ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
+ // so we need to invert the CC opcode.
+ return DAG.getSetCC(DL, N->getValueType(0), PredResult,
+ DAG.getConstant(0, DL, MVT::i32),
+ CC == ISD::SETNE ? ISD::SETEQ : ISD::SETNE);
}
}
diff --git a/llvm/test/CodeGen/PowerPC/memcmpIR.ll b/llvm/test/CodeGen/PowerPC/memcmpIR.ll
index 995ecb64d4bdd..974b8bda34864 100644
--- a/llvm/test/CodeGen/PowerPC/memcmpIR.ll
+++ b/llvm/test/CodeGen/PowerPC/memcmpIR.ll
@@ -127,7 +127,7 @@ entry:
define signext i32 @test4(ptr nocapture readonly %buffer1, ptr nocapture readonly %buffer2) {
entry:
- %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 165)
+ %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 129)
ret i32 %call
}
>From f132007e149dabc4154f3b6d8aa18d9b6fb6e04b Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Tue, 14 Oct 2025 14:22:44 -0400
Subject: [PATCH 08/14] git clang format
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 422d8a4f835bd..a802aec57fdcc 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15697,7 +15697,7 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
// llvm.ppc.altivec.vcmpequb.p TargetConstant:i32<10505>,
// Constant:i32<2>, t3, t5
// t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
-
+
SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
LA->getBasePtr(), LA->getMemOperand());
SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
>From b9849872fc3743c1e9a77ada5d6dfda99661fbb5 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Fri, 31 Oct 2025 18:52:11 +0000
Subject: [PATCH 09/14] address comment
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 102 ++++++++++----------
1 file changed, 52 insertions(+), 50 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index a802aec57fdcc..e4453f7462e15 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15626,6 +15626,56 @@ static bool canConvertToVcmpequb(SDValue &LHS, SDValue RHS) {
return true;
}
+SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
+ const SDLoc &DL, SDValue &LHS,
+ SDValue RHS) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ assert(CC == ISD::SETNE ||
+ CC == ISD::SETEQ && "CC mus be ISD::SETNE or ISD::SETEQ");
+ auto *LA = dyn_cast<LoadSDNode>(LHS);
+ auto *LB = dyn_cast<LoadSDNode>(RHS);
+
+ // Following code transforms the DAG
+ // t0: ch,glue = EntryToken
+ // t2: i64,ch = CopyFromReg t0, Register:i64 %0
+ // t3: i128,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
+ // undef:i64
+ // t4: i64,ch = CopyFromReg t0, Register:i64 %1
+ // t5: i128,ch =
+ // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64 t6: i1 =
+ // setcc t3, t5, setne:ch
+ //
+ // ---->
+ //
+ // t0: ch,glue = EntryToken
+ // t2: i64,ch = CopyFromReg t0, Register:i64 %0
+ // t3: v16i8,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
+ // undef:i64
+ // t4: i64,ch = CopyFromReg t0, Register:i64 %1
+ // t5: v16i8,ch =
+ // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64
+ // t6: i32 =
+ // llvm.ppc.altivec.vcmpequb.p TargetConstant:i32<10505>,
+ // Constant:i32<2>, t3, t5
+ // t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
+
+ SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(), LA->getBasePtr(),
+ LA->getMemOperand());
+ SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(), LB->getBasePtr(),
+ LB->getMemOperand());
+
+ SDValue IntrID =
+ DAG.getConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL, MVT::i32);
+ SDValue CRSel = DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
+ SDValue PredResult = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
+ IntrID, CRSel, LHSVec, RHSVec);
+ // ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
+ // so we need to invert the CC opcode.
+ return DAG.getSetCC(DL, N->getValueType(0), PredResult,
+ DAG.getConstant(0, DL, MVT::i32),
+ CC == ISD::SETNE ? ISD::SETEQ : ISD::SETNE);
+}
+
SDValue PPCTargetLowering::combineSetCC(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::SETCC &&
@@ -15666,56 +15716,8 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
// This transformation replaces memcmp(a, b, 16) with two vector loads
// and one vector compare instruction.
- if (Subtarget.hasAltivec() && canConvertToVcmpequb(LHS, RHS)) {
- SDLoc DL(N);
- SelectionDAG &DAG = DCI.DAG;
- auto *LA = dyn_cast<LoadSDNode>(LHS);
- auto *LB = dyn_cast<LoadSDNode>(RHS);
-
- assert((LA && LB) && "LA and LB must be LoadSDNode");
-
- // Following code transforms the DAG
- // t0: ch,glue = EntryToken
- // t2: i64,ch = CopyFromReg t0, Register:i64 %0
- // t3: i128,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
- // undef:i64
- // t4: i64,ch = CopyFromReg t0, Register:i64 %1
- // t5: i128,ch =
- // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64 t6: i1 =
- // setcc t3, t5, setne:ch
- //
- // ---->
- //
- // t0: ch,glue = EntryToken
- // t2: i64,ch = CopyFromReg t0, Register:i64 %0
- // t3: v16i8,ch = load<(load (s128) from %ir.a, align 1)> t0, t2,
- // undef:i64
- // t4: i64,ch = CopyFromReg t0, Register:i64 %1
- // t5: v16i8,ch =
- // load<(load (s128) from %ir.b, align 1)> t0, t4, undef:i64
- // t6: i32 =
- // llvm.ppc.altivec.vcmpequb.p TargetConstant:i32<10505>,
- // Constant:i32<2>, t3, t5
- // t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
-
- SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(),
- LA->getBasePtr(), LA->getMemOperand());
- SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(),
- LB->getBasePtr(), LB->getMemOperand());
-
- SDValue IntrID =
- DAG.getConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL, MVT::i32);
- SDValue CRSel =
- DAG.getConstant(2, DL, MVT::i32); // which CR6 predicate field
- SDValue PredResult = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
- IntrID, CRSel, LHSVec, RHSVec);
-
- // ppc_altivec_vcmpequb_p returns 1 when two vectors are the same,
- // so we need to invert the CC opcode.
- return DAG.getSetCC(DL, N->getValueType(0), PredResult,
- DAG.getConstant(0, DL, MVT::i32),
- CC == ISD::SETNE ? ISD::SETEQ : ISD::SETNE);
- }
+ if (Subtarget.hasAltivec() && canConvertToVcmpequb(LHS, RHS))
+ return convertTwoLoadsAndCmpToVCMPEQUB(DCI.DAG, N, SDLoc(N), LHS, RHS);
}
return DAGCombineTruncBoolExt(N, DCI);
>From 6e5ebb11371ee4386d893de139eabd678b367e35 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Fri, 31 Oct 2025 19:07:12 +0000
Subject: [PATCH 10/14] reduce the paramters
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index e4453f7462e15..26593f4fc3342 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15627,13 +15627,15 @@ static bool canConvertToVcmpequb(SDValue &LHS, SDValue RHS) {
}
SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
- const SDLoc &DL, SDValue &LHS,
- SDValue RHS) {
+ const SDLoc &DL) {
+
+ assert(N->getOpcode() == ISD::SETCC && "Should be called with a SETCC node");
+
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
assert(CC == ISD::SETNE ||
CC == ISD::SETEQ && "CC mus be ISD::SETNE or ISD::SETEQ");
- auto *LA = dyn_cast<LoadSDNode>(LHS);
- auto *LB = dyn_cast<LoadSDNode>(RHS);
+ auto *LA = dyn_cast<LoadSDNode>(N->getOperand(0));
+ auto *LB = dyn_cast<LoadSDNode>(N->getOperand(1));
// Following code transforms the DAG
// t0: ch,glue = EntryToken
@@ -15717,7 +15719,7 @@ SDValue PPCTargetLowering::combineSetCC(SDNode *N,
// and one vector compare instruction.
if (Subtarget.hasAltivec() && canConvertToVcmpequb(LHS, RHS))
- return convertTwoLoadsAndCmpToVCMPEQUB(DCI.DAG, N, SDLoc(N), LHS, RHS);
+ return convertTwoLoadsAndCmpToVCMPEQUB(DCI.DAG, N, SDLoc(N));
}
return DAGCombineTruncBoolExt(N, DCI);
>From 3567f745a1132db327def55565781dda6b964e73 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Tue, 4 Nov 2025 19:00:53 +0000
Subject: [PATCH 11/14] address comment
---
llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 20514d4890c45..b8b0b4f08a712 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -439,7 +439,11 @@ bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) const {
PPCTTIImpl::TTI::MemCmpExpansionOptions
PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
TTI::MemCmpExpansionOptions Options;
- Options.LoadSizes = {16, 8, 4, 2, 1};
+ if (getST()->hasAltivec())
+ Options.LoadSizes = {16, 8, 4, 2, 1};
+ else
+ Options.LoadSizes = {8, 4, 2, 1};
+
Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
return Options;
}
>From 1da0a5472f0614136bc7b9e44bc612a308279500 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Wed, 5 Nov 2025 15:37:50 +0000
Subject: [PATCH 12/14] add to support i128 constant and load i128 compare
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 85 +++++++++++--------
.../memCmpUsedInZeroEqualityComparison.ll | 19 ++---
2 files changed, 56 insertions(+), 48 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 26593f4fc3342..4bea9f8e68864 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15587,43 +15587,51 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
ShiftCst);
}
+// The function check a i128 load can convert to 16i8 load for Vcmpequb.
static bool canConvertToVcmpequb(SDValue &LHS, SDValue RHS) {
- if (LHS.getOpcode() != ISD::LOAD || RHS.getOpcode() != ISD::LOAD ||
- !LHS.hasOneUse() || !RHS.hasOneUse() || LHS.getValueType() != MVT::i128 ||
- RHS.getValueType() != MVT::i128)
- return false;
+ auto isValidForConvert = [](SDValue &Op) {
+ if (Op.getOpcode() == ISD::Constant)
+ return true;
- auto *LA = dyn_cast<LoadSDNode>(LHS);
- auto *LB = dyn_cast<LoadSDNode>(RHS);
- if (!LA || !LB)
- return false;
+ if (Op.getOpcode() != ISD::LOAD)
+ return false;
+ if (!Op.hasOneUse())
+ return false;
+ if (Op.getValueType() != MVT::i128)
+ return false;
- // If either memory operation (LA or LB) is volatile, do not perform any
- // optimization or transformation. Volatile operations must be preserved
- // as written to ensure correct program behavior, so we return an empty
- // SDValue to indicate no action.
- if (LA->isVolatile() || LB->isVolatile())
- return false;
+ auto *LoadOp = dyn_cast<LoadSDNode>(Op);
+ if (!LoadOp)
+ return false;
- // Only combine loads if both use the unindexed addressing mode.
- // PowerPC AltiVec/VMX does not support vector loads or stores with
- // pre/post-increment addressing. Indexed modes may imply implicit
- // pointer updates, which are not compatible with AltiVec vector
- // instructions.
- if (LA->getAddressingMode() != ISD::UNINDEXED ||
- LB->getAddressingMode() != ISD::UNINDEXED)
- return false;
+ // If memory operation is volatile, do not perform any
+ // optimization or transformation. Volatile operations must be preserved
+ // as written to ensure correct program behavior, so we return an empty
+ // SDValue to indicate no action.
- // Only combine loads if both are non-extending loads
- // (ISD::NON_EXTLOAD). Extending loads (such as ISD::ZEXTLOAD or
- // ISD::SEXTLOAD) perform zero or sign extension, which may change the
- // loaded value's semantics and are not compatible with vector loads.
- if (LA->getExtensionType() != ISD::NON_EXTLOAD ||
- LB->getExtensionType() != ISD::NON_EXTLOAD)
- return false;
+ if (LoadOp->isVolatile())
+ return false;
- return true;
+ // Only combine loads if both use the unindexed addressing mode.
+ // PowerPC AltiVec/VMX does not support vector loads or stores with
+ // pre/post-increment addressing. Indexed modes may imply implicit
+ // pointer updates, which are not compatible with AltiVec vector
+ // instructions.
+ if (LoadOp->getAddressingMode() != ISD::UNINDEXED)
+ return false;
+
+ // Only combine loads if both are non-extending loads
+ // (ISD::NON_EXTLOAD). Extending loads (such as ISD::ZEXTLOAD or
+ // ISD::SEXTLOAD) perform zero or sign extension, which may change the
+ // loaded value's semantics and are not compatible with vector loads.
+ if (LoadOp->getExtensionType() != ISD::NON_EXTLOAD)
+ return false;
+
+ return true;
+ };
+
+ return (isValidForConvert(LHS) && isValidForConvert(RHS));
}
SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
@@ -15634,8 +15642,15 @@ SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
assert(CC == ISD::SETNE ||
CC == ISD::SETEQ && "CC mus be ISD::SETNE or ISD::SETEQ");
- auto *LA = dyn_cast<LoadSDNode>(N->getOperand(0));
- auto *LB = dyn_cast<LoadSDNode>(N->getOperand(1));
+
+ auto getV16i8Load = [&](const SDValue &Op) {
+ if (Op.getOpcode() == ISD::Constant)
+ return DAG.getBitcast(MVT::v16i8, Op);
+ assert(Op.getOpcode() == ISD::LOAD && "Must be LoadSDNode here.");
+ auto *LoadNode = dyn_cast<LoadSDNode>(Op);
+ return DAG.getLoad(MVT::v16i8, DL, LoadNode->getChain(),
+ LoadNode->getBasePtr(), LoadNode->getMemOperand());
+ };
// Following code transforms the DAG
// t0: ch,glue = EntryToken
@@ -15661,10 +15676,8 @@ SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
// Constant:i32<2>, t3, t5
// t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
- SDValue LHSVec = DAG.getLoad(MVT::v16i8, DL, LA->getChain(), LA->getBasePtr(),
- LA->getMemOperand());
- SDValue RHSVec = DAG.getLoad(MVT::v16i8, DL, LB->getChain(), LB->getBasePtr(),
- LB->getMemOperand());
+ SDValue LHSVec = getV16i8Load(N->getOperand(0));
+ SDValue RHSVec = getV16i8Load(N->getOperand(1));
SDValue IntrID =
DAG.getConstant(Intrinsic::ppc_altivec_vcmpequb_p, DL, MVT::i32);
diff --git a/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll b/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
index 7c4cf7265ff6a..bf86695818689 100644
--- a/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
+++ b/llvm/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
@@ -116,18 +116,13 @@ define signext i32 @equalityFoldTwoConstants() {
define signext i32 @equalityFoldOneConstant(ptr %X) {
; CHECK-LABEL: equalityFoldOneConstant:
; CHECK: # %bb.0:
-; CHECK-NEXT: li 5, 1
-; CHECK-NEXT: ld 4, 8(3)
-; CHECK-NEXT: ld 3, 0(3)
-; CHECK-NEXT: rldic 5, 5, 32, 31
-; CHECK-NEXT: xor 3, 3, 5
-; CHECK-NEXT: lis 5, -32768
-; CHECK-NEXT: ori 5, 5, 1
-; CHECK-NEXT: rldic 5, 5, 1, 30
-; CHECK-NEXT: xor 4, 4, 5
-; CHECK-NEXT: or 3, 3, 4
-; CHECK-NEXT: cntlzd 3, 3
-; CHECK-NEXT: rldicl 3, 3, 58, 63
+; CHECK-NEXT: lxvd2x 34, 0, 3
+; CHECK-NEXT: addis 3, 2, .LCPI6_0 at toc@ha
+; CHECK-NEXT: addi 3, 3, .LCPI6_0 at toc@l
+; CHECK-NEXT: lxvd2x 35, 0, 3
+; CHECK-NEXT: vcmpequb. 2, 2, 3
+; CHECK-NEXT: mfocrf 3, 2
+; CHECK-NEXT: rlwinm 3, 3, 25, 31, 31
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr @zeroEqualityTest04.buffer1, ptr %X, i64 16)
%not.tobool = icmp eq i32 %call, 0
>From 1ed8dfe3b0ea04284e88e495b8e5f7a1e4d14fc2 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Wed, 5 Nov 2025 16:03:17 +0000
Subject: [PATCH 13/14] add more comment
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 4bea9f8e68864..1130232b50c65 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15596,8 +15596,10 @@ static bool canConvertToVcmpequb(SDValue &LHS, SDValue RHS) {
if (Op.getOpcode() != ISD::LOAD)
return false;
+
if (!Op.hasOneUse())
return false;
+
if (Op.getValueType() != MVT::i128)
return false;
@@ -15646,7 +15648,9 @@ SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
auto getV16i8Load = [&](const SDValue &Op) {
if (Op.getOpcode() == ISD::Constant)
return DAG.getBitcast(MVT::v16i8, Op);
+
assert(Op.getOpcode() == ISD::LOAD && "Must be LoadSDNode here.");
+
auto *LoadNode = dyn_cast<LoadSDNode>(Op);
return DAG.getLoad(MVT::v16i8, DL, LoadNode->getChain(),
LoadNode->getBasePtr(), LoadNode->getMemOperand());
@@ -15676,6 +15680,16 @@ SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
// Constant:i32<2>, t3, t5
// t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
+ // Or transforms the DAG
+ // t5: i128,ch = load<(load (s128) from %ir.X, align 1)> t0, t2, undef:i64
+ // t8: i1 = setcc Constant:i128<237684487579686500932345921536>, t5, setne:ch
+ //
+ // --->
+ //
+ // t5: v16i8,ch = load<(load (s128) from %ir.X, align 1)> t0, t2, undef:i64
+ // t6: v16i8 = bitcast Constant:i128<237684487579686500932345921536>
+ // t7: i32 = llvm.ppc.altivec.vcmpequb.p Constant:i32<10962>, Constant:i32<2>, t5, t26
+
SDValue LHSVec = getV16i8Load(N->getOperand(0));
SDValue RHSVec = getV16i8Load(N->getOperand(1));
>From f561b34d4b07bd1ba93ca4b2b5aee92585f1e351 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Wed, 5 Nov 2025 21:32:23 +0000
Subject: [PATCH 14/14] clang format
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1130232b50c65..d1fe25d85faae 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15681,14 +15681,16 @@ SDValue convertTwoLoadsAndCmpToVCMPEQUB(SelectionDAG &DAG, SDNode *N,
// t7: i1 = setcc t6, Constant:i32<0>, seteq:ch
// Or transforms the DAG
- // t5: i128,ch = load<(load (s128) from %ir.X, align 1)> t0, t2, undef:i64
- // t8: i1 = setcc Constant:i128<237684487579686500932345921536>, t5, setne:ch
+ // t5: i128,ch = load<(load (s128) from %ir.X, align 1)> t0, t2, undef:i64
+ // t8: i1 =
+ // setcc Constant:i128<237684487579686500932345921536>, t5, setne:ch
//
- // --->
+ // --->
//
- // t5: v16i8,ch = load<(load (s128) from %ir.X, align 1)> t0, t2, undef:i64
- // t6: v16i8 = bitcast Constant:i128<237684487579686500932345921536>
- // t7: i32 = llvm.ppc.altivec.vcmpequb.p Constant:i32<10962>, Constant:i32<2>, t5, t26
+ // t5: v16i8,ch = load<(load (s128) from %ir.X, align 1)> t0, t2, undef:i64
+ // t6: v16i8 = bitcast Constant:i128<237684487579686500932345921536>
+ // t7: i32 =
+ // llvm.ppc.altivec.vcmpequb.p Constant:i32<10962>, Constant:i32<2>, t5, t2
SDValue LHSVec = getV16i8Load(N->getOperand(0));
SDValue RHSVec = getV16i8Load(N->getOperand(1));
More information about the llvm-commits
mailing list