[llvm] [PowerPC] Exploit xxeval instruction for ternary patterns - part 1 (PR #141733)
Tony Varghese via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 21 06:54:30 PDT 2025
https://github.com/tonykuttai updated https://github.com/llvm/llvm-project/pull/141733
>From f6658cb22f37d3ab5a7f092b0262fe8347b8cd82 Mon Sep 17 00:00:00 2001
From: Tony Varghese <tony.varghese at ibm.com>
Date: Thu, 29 May 2025 16:33:21 +0000
Subject: [PATCH 1/3] [PowerPC][XXEVAL] Exploit xxeval instruction for cases of
the ternary(A,X, and(B,C)), ternary(A,X,B), ternary(A,X,C),
ternary(A,X,xor(B,C)) forms.
---
llvm/lib/Target/PowerPC/PPCInstrP10.td | 310 ++++++++++++++++--
.../CodeGen/PowerPC/xxeval-vselect-x-and.ll | 82 ++---
.../CodeGen/PowerPC/xxeval-vselect-x-b.ll | 50 +--
.../CodeGen/PowerPC/xxeval-vselect-x-c.ll | 50 +--
.../CodeGen/PowerPC/xxeval-vselect-x-xor.ll | 74 ++---
5 files changed, 354 insertions(+), 212 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td
index d295f35fb1dd0..aa31478589cea 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrP10.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td
@@ -2159,8 +2159,254 @@ let AddedComplexity = 400, Predicates = [IsISA3_1, HasVSX] in {
(COPY_TO_REGCLASS $VRB, VSRC), 2)))>;
}
-class XXEvalPattern <dag pattern, bits<8> imm> :
- Pat<(v4i32 pattern), (XXEVAL $vA, $vB, $vC, imm)> {}
+ // Defines a pattern for the XXEVAL instruction with a specific value type,
+ // pattern, and immediate.
+class XXEvalPattern <ValueType vt, dag pattern, bits<8> imm> :
+ Pat<(vt pattern), (XXEVAL $vA, $vB, $vC, imm)> {}
+
+ // Helper class to generate binary operation DAGs for various vector types.
+ // For v4i32, emits (op B C).
+ // For other types, bitcasts operands to v4i32, applies the op, then bitcasts back.
+class BinaryOpDag<ValueType vt, SDPatternOperator op > {
+ // The DAG for the binary operation.
+ dag OpDag = !if( !eq(vt, v4i32),
+ (op vt:$vB, vt:$vC),
+ (vt (bitconvert (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC)))))
+ );
+ // The DAG for the binary operation with a NOT applied to the result.
+ dag VnotOpDag = !if( !eq(vt, v4i32),
+ (vnot (op vt:$vB, vt:$vC)),
+ (vt (bitconvert (vnot (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC))))))
+ );
+}
+
+ // Helper class to generate unary NOT patterns for vector types.
+ // For v4i32, emits (vnot B) or (vnot C).
+ // For other types, bitcasts operand to v4i32, applies vnot, then bitcasts back.
+class XXEvalUnaryNotPattern<ValueType vt> {
+ dag vnotB = !if( !eq(vt, v4i32),
+ (vnot vt:$vB),
+ (vt (bitconvert (vnot (v4i32 (bitconvert vt:$vB)))))
+ );
+ dag vnotC = !if( !eq(vt, v4i32),
+ (vnot vt:$vC),
+ (vt (bitconvert (vnot (v4i32 (bitconvert vt:$vC)))))
+ );
+}
+
+ // Wrapper class for binary patterns with optional NOT on the result.
+ // If 'not' is 0, emits the binary op; if 1, emits vnot of the binary op.
+class XXEvalBinaryPattern<ValueType vt, SDPatternOperator op, bit not = 0> {
+ dag opPat = !if( !eq(not, 0),
+ BinaryOpDag<vt, op>.OpDag,
+ BinaryOpDag<vt, op>.VnotOpDag
+ );
+}
+
+multiclass XXEvalVSelectWithXAnd<ValueType vt, bits<8> baseImm> {
+ // Multiclass for ternary patterns of the form vselect(A, X, and(B, C)).
+ // vselect(A, xor(B,C), and(B,C)) => imm = baseImm = 22
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
+ baseImm>;
+ // vselect(A, nor(B,C), and(B,C)) => imm = baseImm + 2 = 24
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 2)>;
+ // vselect(A, eqv(B,C), and(B,C)) => imm = baseImm + 3 = 25
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 3)>;
+ // vselect(A, not(C), and(B,C)) => imm = baseImm + 4 = 26
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotC, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 4)>;
+ // vselect(A, not(B), and(B,C)) => imm = baseImm + 6 = 28
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotB, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 6)>;
+}
+
+multiclass XXEvalVSelectWithXB<ValueType vt, bits<8> baseImm>{
+ // Multiclass for ternary patterns of the form vselect(A, X, B).
+ // vselect(A, and(B,C), B) => imm = baseImm = 49
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vB),
+ baseImm>;
+ // vselect(A, nor(B,C), B) => imm = baseImm + 7 = 56
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vB),
+ !add(baseImm, 7)>;
+ // vselect(A, eqv(B,C), B) => imm = baseImm + 8 = 57
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vB),
+ !add(baseImm, 8)>;
+ // vselect(A, nand(B,C), B) => imm = baseImm + 13 = 62
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vB),
+ !add(baseImm, 13)>;
+}
+
+multiclass XXEvalVSelectWithXC<ValueType vt, bits<8> baseImm>{
+ // Multiclass for ternary patterns of the form vselect(A, X, C).
+ // vselect(A, and(B,C), C) => imm = baseImm = 81
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vC),
+ baseImm>;
+ // vselect(A, nor(B,C), C) => imm = baseImm + 7 = 88
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vC),
+ !add(baseImm, 7)>;
+ // vselect(A, eqv(B,C), C) => imm = baseImm + 8 = 89
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vC),
+ !add(baseImm, 8)>;
+ // vselect(A, nand(B,C), C) => imm = baseImm + 13 = 94
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vC),
+ !add(baseImm, 13)>;
+}
+
+multiclass XXEvalVSelectWithXXor<ValueType vt, bits<8> baseImm>{
+ // Multiclass for ternary patterns of the form vselect(A, X, xor(B,C)).
+ // vselect(A, and(B,C), xor(B,C)) => imm = baseImm = 97
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
+ baseImm>;
+ // vselect(A, B, xor(B,C)) => imm = baseImm + 2 = 99
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, vt:$vB, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 2)>;
+ // vselect(A, C, xor(B,C)) => imm = baseImm + 4 = 101
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, vt:$vC, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 4)>;
+ // vselect(A, or(B,C), xor(B,C)) => imm = baseImm + 6 = 103
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 6)>;
+ // vselect(A, nor(B,C), xor(B,C)) => imm = baseImm + 7 = 104
+ def : XXEvalPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 7)>;
+}
+
+ // Pattern class using COPY_TO_REGCLASS for type casting
+class XXEvalBitcastPattern<ValueType vt, dag pattern, bits<8> imm> :
+ Pat<(vt pattern),
+ (COPY_TO_REGCLASS
+ (XXEVAL
+ (COPY_TO_REGCLASS vt:$vA, VSRC),
+ (COPY_TO_REGCLASS vt:$vB, VSRC),
+ (COPY_TO_REGCLASS vt:$vC, VSRC),
+ imm),
+ VRRC)>;
+
+multiclass XXEvalVSelectWithXAndCast<ValueType vt, bits<8> baseImm> {
+ // Multiclass for ternary patterns using COPY_TO_REGCLASS for unsupported types
+ // vselect(A, xor(B,C), and(B,C)) => imm = baseImm = 22
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
+ baseImm>;
+ // vselect(A, nor(B,C), and(B,C)) => imm = baseImm + 2 = 24
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 2)>;
+ // vselect(A, eqv(B,C), and(B,C)) => imm = baseImm + 3 = 25
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 3)>;
+ // vselect(A, not(C), and(B,C)) => imm = baseImm + 4 = 26
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotC, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 4)>;
+ // vselect(A, not(B), and(B,C)) => imm = baseImm + 6 = 28
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotB, XXEvalBinaryPattern<vt, and>.opPat),
+ !add(baseImm, 6)>;
+}
+
+multiclass XXEvalVSelectWithXBCast<ValueType vt, bits<8> baseImm>{
+ // vselect(A, and(B,C), B) => imm = baseImm = 49
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vB),
+ baseImm>;
+ // vselect(A, nor(B,C), B) => imm = baseImm + 7 = 56
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vB),
+ !add(baseImm, 7)>;
+ // vselect(A, eqv(B,C), B) => imm = baseImm + 8 = 57
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vB),
+ !add(baseImm, 8)>;
+ // vselect(A, nand(B,C), B) => imm = baseImm + 13 = 62
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vB),
+ !add(baseImm, 13)>;
+}
+
+multiclass XXEvalVSelectWithXCCast<ValueType vt, bits<8> baseImm>{
+ // vselect(A, and(B,C), C) => imm = baseImm = 81
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vC),
+ baseImm>;
+ // vselect(A, nor(B,C), C) => imm = baseImm + 7 = 88
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vC),
+ !add(baseImm, 7)>;
+ // vselect(A, eqv(B,C), C) => imm = baseImm + 8 = 89
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vC),
+ !add(baseImm, 8)>;
+ // vselect(A, nand(B,C), C) => imm = baseImm + 13 = 94
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vC),
+ !add(baseImm, 13)>;
+}
+
+multiclass XXEvalVSelectWithXXorCast<ValueType vt, bits<8> baseImm>{
+ // vselect(A, and(B,C), xor(B,C)) => imm = baseImm = 97
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
+ baseImm>;
+ // vselect(A, B, xor(B,C)) => imm = baseImm + 2 = 99
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, vt:$vB, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 2)>;
+ // vselect(A, C, xor(B,C)) => imm = baseImm + 4 = 101
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, vt:$vC, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 4)>;
+ // vselect(A, or(B,C), xor(B,C)) => imm = baseImm + 6 = 103
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 6)>;
+ // vselect(A, nor(B,C), xor(B,C)) => imm = baseImm + 7 = 104
+ def : XXEvalBitcastPattern<vt,
+ (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
+ !add(baseImm, 7)>;
+}
+
+// Instantiate XXEval patterns for all vector types
+let Predicates = [HasP10Vector] in {
+ let AddedComplexity = 500 in {
+ // For types directly supported by XXEVAL (v4i32, v2i64)
+ foreach type = [v4i32, v2i64] in {
+ defm : XXEvalVSelectWithXAnd<type, 22>;
+ defm : XXEvalVSelectWithXB<type, 49>;
+ defm : XXEvalVSelectWithXC<type, 81>;
+ defm : XXEvalVSelectWithXXor<type, 97>;
+ }
+
+ // For types that need COPY_TO_REGCLASS (v8i16, v16i8)
+ foreach type = [v8i16, v16i8] in {
+ defm : XXEvalVSelectWithXAndCast<type, 22>;
+ defm : XXEvalVSelectWithXBCast<type, 49>;
+ defm : XXEvalVSelectWithXCCast<type, 81>;
+ defm : XXEvalVSelectWithXXorCast<type, 97>;
+ }
+ }
+}
let Predicates = [PrefixInstrs, HasP10Vector] in {
let AddedComplexity = 400 in {
@@ -2192,83 +2438,83 @@ let Predicates = [PrefixInstrs, HasP10Vector] in {
// Anonymous patterns for XXEVAL
// AND
// and(A, B, C)
- def : XXEvalPattern<(and v4i32:$vA, (and v4i32:$vB, v4i32:$vC)), 1>;
+ def : XXEvalPattern<v4i32, (and v4i32:$vA, (and v4i32:$vB, v4i32:$vC)), 1>;
// and(A, xor(B, C))
- def : XXEvalPattern<(and v4i32:$vA, (xor v4i32:$vB, v4i32:$vC)), 6>;
+ def : XXEvalPattern<v4i32, (and v4i32:$vA, (xor v4i32:$vB, v4i32:$vC)), 6>;
// and(A, or(B, C))
- def : XXEvalPattern<(and v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 7>;
+ def : XXEvalPattern<v4i32, (and v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 7>;
// and(A, nor(B, C))
- def : XXEvalPattern<(and v4i32:$vA, (vnot (or v4i32:$vB, v4i32:$vC))), 8>;
+ def : XXEvalPattern<v4i32, (and v4i32:$vA, (vnot (or v4i32:$vB, v4i32:$vC))), 8>;
// and(A, eqv(B, C))
- def : XXEvalPattern<(and v4i32:$vA, (vnot (xor v4i32:$vB, v4i32:$vC))), 9>;
+ def : XXEvalPattern<v4i32, (and v4i32:$vA, (vnot (xor v4i32:$vB, v4i32:$vC))), 9>;
// and(A, nand(B, C))
- def : XXEvalPattern<(and v4i32:$vA, (vnot (and v4i32:$vB, v4i32:$vC))), 14>;
+ def : XXEvalPattern<v4i32, (and v4i32:$vA, (vnot (and v4i32:$vB, v4i32:$vC))), 14>;
// NAND
// nand(A, B, C)
- def : XXEvalPattern<(vnot (and v4i32:$vA, (and v4i32:$vB, v4i32:$vC))),
+ def : XXEvalPattern<v4i32, (vnot (and v4i32:$vA, (and v4i32:$vB, v4i32:$vC))),
!sub(255, 1)>;
// nand(A, xor(B, C))
- def : XXEvalPattern<(vnot (and v4i32:$vA, (xor v4i32:$vB, v4i32:$vC))),
+ def : XXEvalPattern<v4i32, (vnot (and v4i32:$vA, (xor v4i32:$vB, v4i32:$vC))),
!sub(255, 6)>;
// nand(A, or(B, C))
- def : XXEvalPattern<(vnot (and v4i32:$vA, (or v4i32:$vB, v4i32:$vC))),
+ def : XXEvalPattern<v4i32, (vnot (and v4i32:$vA, (or v4i32:$vB, v4i32:$vC))),
!sub(255, 7)>;
// nand(A, nor(B, C))
- def : XXEvalPattern<(or (vnot v4i32:$vA), (or v4i32:$vB, v4i32:$vC)),
+ def : XXEvalPattern<v4i32, (or (vnot v4i32:$vA), (or v4i32:$vB, v4i32:$vC)),
!sub(255, 8)>;
// nand(A, eqv(B, C))
- def : XXEvalPattern<(or (vnot v4i32:$vA), (xor v4i32:$vB, v4i32:$vC)),
+ def : XXEvalPattern<v4i32, (or (vnot v4i32:$vA), (xor v4i32:$vB, v4i32:$vC)),
!sub(255, 9)>;
// nand(A, nand(B, C))
- def : XXEvalPattern<(or (vnot v4i32:$vA), (and v4i32:$vB, v4i32:$vC)),
+ def : XXEvalPattern<v4i32, (or (vnot v4i32:$vA), (and v4i32:$vB, v4i32:$vC)),
!sub(255, 14)>;
// EQV
// (eqv A, B, C)
- def : XXEvalPattern<(or (and v4i32:$vA, (and v4i32:$vB, v4i32:$vC)),
+ def : XXEvalPattern<v4i32, (or (and v4i32:$vA, (and v4i32:$vB, v4i32:$vC)),
(vnot (or v4i32:$vA, (or v4i32:$vB, v4i32:$vC)))),
150>;
// (eqv A, (and B, C))
- def : XXEvalPattern<(vnot (xor v4i32:$vA, (and v4i32:$vB, v4i32:$vC))), 225>;
+ def : XXEvalPattern<v4i32, (vnot (xor v4i32:$vA, (and v4i32:$vB, v4i32:$vC))), 225>;
// (eqv A, (or B, C))
- def : XXEvalPattern<(vnot (xor v4i32:$vA, (or v4i32:$vB, v4i32:$vC))), 135>;
+ def : XXEvalPattern<v4i32, (vnot (xor v4i32:$vA, (or v4i32:$vB, v4i32:$vC))), 135>;
// NOR
// (nor A, B, C)
- def : XXEvalPattern<(vnot (or v4i32:$vA, (or v4i32:$vB, v4i32:$vC))), 128>;
+ def : XXEvalPattern<v4i32, (vnot (or v4i32:$vA, (or v4i32:$vB, v4i32:$vC))), 128>;
// (nor A, (and B, C))
- def : XXEvalPattern<(vnot (or v4i32:$vA, (and v4i32:$vB, v4i32:$vC))), 224>;
+ def : XXEvalPattern<v4i32, (vnot (or v4i32:$vA, (and v4i32:$vB, v4i32:$vC))), 224>;
// (nor A, (eqv B, C))
- def : XXEvalPattern<(and (vnot v4i32:$vA), (xor v4i32:$vB, v4i32:$vC)), 96>;
+ def : XXEvalPattern<v4i32, (and (vnot v4i32:$vA), (xor v4i32:$vB, v4i32:$vC)), 96>;
// (nor A, (nand B, C))
- def : XXEvalPattern<(and (vnot v4i32:$vA), (and v4i32:$vB, v4i32:$vC)), 16>;
+ def : XXEvalPattern<v4i32, (and (vnot v4i32:$vA), (and v4i32:$vB, v4i32:$vC)), 16>;
// (nor A, (nor B, C))
- def : XXEvalPattern<(and (vnot v4i32:$vA), (or v4i32:$vB, v4i32:$vC)), 112>;
+ def : XXEvalPattern<v4i32, (and (vnot v4i32:$vA), (or v4i32:$vB, v4i32:$vC)), 112>;
// (nor A, (xor B, C))
- def : XXEvalPattern<(vnot (or v4i32:$vA, (xor v4i32:$vB, v4i32:$vC))), 144>;
+ def : XXEvalPattern<v4i32, (vnot (or v4i32:$vA, (xor v4i32:$vB, v4i32:$vC))), 144>;
// OR
// (or A, B, C)
- def : XXEvalPattern<(or v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 127>;
+ def : XXEvalPattern<v4i32, (or v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 127>;
// (or A, (and B, C))
- def : XXEvalPattern<(or v4i32:$vA, (and v4i32:$vB, v4i32:$vC)), 31>;
+ def : XXEvalPattern<v4i32, (or v4i32:$vA, (and v4i32:$vB, v4i32:$vC)), 31>;
// (or A, (eqv B, C))
- def : XXEvalPattern<(or v4i32:$vA, (vnot (xor v4i32:$vB, v4i32:$vC))), 159>;
+ def : XXEvalPattern<v4i32, (or v4i32:$vA, (vnot (xor v4i32:$vB, v4i32:$vC))), 159>;
// (or A, (nand B, C))
- def : XXEvalPattern<(or v4i32:$vA, (vnot (and v4i32:$vB, v4i32:$vC))), 239>;
+ def : XXEvalPattern<v4i32, (or v4i32:$vA, (vnot (and v4i32:$vB, v4i32:$vC))), 239>;
// (or A, (nor B, C))
- def : XXEvalPattern<(or v4i32:$vA, (vnot (or v4i32:$vB, v4i32:$vC))), 143>;
+ def : XXEvalPattern<v4i32, (or v4i32:$vA, (vnot (or v4i32:$vB, v4i32:$vC))), 143>;
// (or A, (xor B, C))
- def : XXEvalPattern<(or v4i32:$vA, (xor v4i32:$vB, v4i32:$vC)), 111>;
+ def : XXEvalPattern<v4i32, (or v4i32:$vA, (xor v4i32:$vB, v4i32:$vC)), 111>;
// XOR
// (xor A, B, C)
- def : XXEvalPattern<(xor v4i32:$vA, (xor v4i32:$vB, v4i32:$vC)), 105>;
+ def : XXEvalPattern<v4i32, (xor v4i32:$vA, (xor v4i32:$vB, v4i32:$vC)), 105>;
// (xor A, (and B, C))
- def : XXEvalPattern<(xor v4i32:$vA, (and v4i32:$vB, v4i32:$vC)), 30>;
+ def : XXEvalPattern<v4i32, (xor v4i32:$vA, (and v4i32:$vB, v4i32:$vC)), 30>;
// (xor A, (or B, C))
- def : XXEvalPattern<(xor v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 120>;
+ def : XXEvalPattern<v4i32, (xor v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 120>;
// Anonymous patterns to select prefixed VSX loads and stores.
// Load / Store f128
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll
index 57d4c48a1aaa2..b41220b01373a 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
+; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_xor_BC_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_xor_BC_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -33,12 +31,10 @@ define <2 x i64> @ternary_A_xor_BC_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_xor_BC_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -52,11 +48,9 @@ define <16 x i8> @ternary_A_xor_BC_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_xor_BC_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -70,11 +64,9 @@ define <8 x i16> @ternary_A_xor_BC_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_xor_BC_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -88,11 +80,9 @@ define <4 x i32> @ternary_A_nor_BC_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_nor_BC_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -107,12 +97,10 @@ define <2 x i64> @ternary_A_nor_BC_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_nor_BC_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -127,11 +115,9 @@ define <16 x i8> @ternary_A_nor_BC_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_nor_BC_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -146,11 +132,9 @@ define <8 x i16> @ternary_A_nor_BC_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_nor_BC_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -165,11 +149,9 @@ define <4 x i32> @ternary_A_eqv_BC_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -184,12 +166,10 @@ define <2 x i64> @ternary_A_eqv_BC_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -204,11 +184,9 @@ define <16 x i8> @ternary_A_eqv_BC_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -223,11 +201,9 @@ define <8 x i16> @ternary_A_eqv_BC_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -242,11 +218,9 @@ define <4 x i32> @ternary_A_not_C_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_not_C_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -260,12 +234,10 @@ define <2 x i64> @ternary_A_not_C_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_not_C_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation
@@ -279,11 +251,9 @@ define <16 x i8> @ternary_A_not_C_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_not_C_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -297,11 +267,9 @@ define <8 x i16> @ternary_A_not_C_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_not_C_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
@@ -315,11 +283,9 @@ define <4 x i32> @ternary_A_not_B_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_not_B_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %B, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -333,12 +299,10 @@ define <2 x i64> @ternary_A_not_B_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_not_B_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %B, <i64 -1, i64 -1> ; Vector not operation
@@ -352,11 +316,9 @@ define <16 x i8> @ternary_A_not_B_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_not_B_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %B, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -370,11 +332,9 @@ define <8 x i16> @ternary_A_not_B_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_not_B_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %B, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
index c366fd5f0a8c2..8fd2453266706 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
+; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,10 +15,9 @@ define <4 x i32> @ternary_A_and_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_and_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -31,11 +30,10 @@ define <2 x i64> @ternary_A_and_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_and_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -48,10 +46,9 @@ define <16 x i8> @ternary_A_and_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_and_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -64,10 +61,9 @@ define <8 x i16> @ternary_A_and_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_and_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -80,10 +76,9 @@ define <4 x i32> @ternary_A_nor_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_nor_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -97,11 +92,10 @@ define <2 x i64> @ternary_A_nor_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_nor_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -115,10 +109,9 @@ define <16 x i8> @ternary_A_nor_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nor_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -132,10 +125,9 @@ define <8 x i16> @ternary_A_nor_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_nor_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -149,10 +141,9 @@ define <4 x i32> @ternary_A_eqv_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_eqv_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -166,11 +157,10 @@ define <2 x i64> @ternary_A_eqv_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_eqv_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -184,10 +174,9 @@ define <16 x i8> @ternary_A_eqv_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_eqv_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -201,10 +190,9 @@ define <8 x i16> @ternary_A_eqv_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_eqv_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -218,10 +206,9 @@ define <4 x i32> @ternary_A_nand_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_nand_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -235,11 +222,10 @@ define <2 x i64> @ternary_A_nand_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_nand_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -253,10 +239,9 @@ define <16 x i8> @ternary_A_nand_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nand_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -270,10 +255,9 @@ define <8 x i16> @ternary_A_nand_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_nand_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
index f70f1d093f069..c25288df78af6 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
+; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,10 +15,9 @@ define <4 x i32> @ternary_A_and_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_and_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -31,11 +30,10 @@ define <2 x i64> @ternary_A_and_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_and_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -48,10 +46,9 @@ define <16 x i8> @ternary_A_and_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_and_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -64,10 +61,9 @@ define <8 x i16> @ternary_A_and_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_and_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -80,10 +76,9 @@ define <4 x i32> @ternary_A_nor_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_nor_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -97,11 +92,10 @@ define <2 x i64> @ternary_A_nor_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_nor_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -115,10 +109,9 @@ define <16 x i8> @ternary_A_nor_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nor_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -132,10 +125,9 @@ define <8 x i16> @ternary_A_nor_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_nor_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -149,10 +141,9 @@ define <4 x i32> @ternary_A_eqv_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_eqv_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -166,11 +157,10 @@ define <2 x i64> @ternary_A_eqv_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_eqv_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -184,10 +174,9 @@ define <16 x i8> @ternary_A_eqv_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_eqv_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -201,10 +190,9 @@ define <8 x i16> @ternary_A_eqv_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_eqv_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -218,10 +206,9 @@ define <4 x i32> @ternary_A_nand_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_nand_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -235,11 +222,10 @@ define <2 x i64> @ternary_A_nand_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_nand_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -253,10 +239,9 @@ define <16 x i8> @ternary_A_nand_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nand_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -270,10 +255,9 @@ define <8 x i16> @ternary_A_nand_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_nand_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll
index a3fdc905cb52c..0fc296cc5a4e2 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
+; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_and_BC_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_and_BC_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -33,12 +31,10 @@ define <2 x i64> @ternary_A_and_BC_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_and_BC_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -52,11 +48,9 @@ define <16 x i8> @ternary_A_and_BC_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_and_BC_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -70,11 +64,9 @@ define <8 x i16> @ternary_A_and_BC_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_and_BC_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -88,10 +80,9 @@ define <4 x i32> @ternary_A_B_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_B_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -104,11 +95,10 @@ define <2 x i64> @ternary_A_B_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_B_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -121,10 +111,9 @@ define <16 x i8> @ternary_A_B_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_B_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -137,10 +126,9 @@ define <8 x i16> @ternary_A_B_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_B_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -153,10 +141,9 @@ define <4 x i32> @ternary_A_C_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_C_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -169,11 +156,10 @@ define <2 x i64> @ternary_A_C_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_C_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -186,10 +172,9 @@ define <16 x i8> @ternary_A_C_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_C_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -202,10 +187,9 @@ define <8 x i16> @ternary_A_C_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_C_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -218,11 +202,9 @@ define <4 x i32> @ternary_A_or_BC_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_or_BC_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -236,12 +218,10 @@ define <2 x i64> @ternary_A_or_BC_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_or_BC_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -255,11 +235,9 @@ define <16 x i8> @ternary_A_or_BC_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_or_BC_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -273,11 +251,9 @@ define <8 x i16> @ternary_A_or_BC_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_or_BC_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -291,11 +267,9 @@ define <4 x i32> @ternary_A_nor_BC_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -310,12 +284,10 @@ define <2 x i64> @ternary_A_nor_BC_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -330,11 +302,9 @@ define <16 x i8> @ternary_A_nor_BC_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -349,11 +319,9 @@ define <8 x i16> @ternary_A_nor_BC_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
>From 7147072a2d76e817d7e814dd49e8db064db7ddb8 Mon Sep 17 00:00:00 2001
From: Tony Varghese <tony.varghese at ibm.com>
Date: Sat, 12 Jul 2025 04:36:40 +0000
Subject: [PATCH 2/3] Updated llvm/lib/Target/PowerPC/PPCInstrP10.td to inline
the BinaryOpDag class
---
llvm/lib/Target/PowerPC/PPCInstrP10.td | 31 +++++++++-----------------
1 file changed, 10 insertions(+), 21 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td
index aa31478589cea..19ef59cf50faa 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrP10.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td
@@ -2164,22 +2164,6 @@ let AddedComplexity = 400, Predicates = [IsISA3_1, HasVSX] in {
class XXEvalPattern <ValueType vt, dag pattern, bits<8> imm> :
Pat<(vt pattern), (XXEVAL $vA, $vB, $vC, imm)> {}
- // Helper class to generate binary operation DAGs for various vector types.
- // For v4i32, emits (op B C).
- // For other types, bitcasts operands to v4i32, applies the op, then bitcasts back.
-class BinaryOpDag<ValueType vt, SDPatternOperator op > {
- // The DAG for the binary operation.
- dag OpDag = !if( !eq(vt, v4i32),
- (op vt:$vB, vt:$vC),
- (vt (bitconvert (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC)))))
- );
- // The DAG for the binary operation with a NOT applied to the result.
- dag VnotOpDag = !if( !eq(vt, v4i32),
- (vnot (op vt:$vB, vt:$vC)),
- (vt (bitconvert (vnot (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC))))))
- );
-}
-
// Helper class to generate unary NOT patterns for vector types.
// For v4i32, emits (vnot B) or (vnot C).
// For other types, bitcasts operand to v4i32, applies vnot, then bitcasts back.
@@ -2197,10 +2181,15 @@ class XXEvalUnaryNotPattern<ValueType vt> {
// Wrapper class for binary patterns with optional NOT on the result.
// If 'not' is 0, emits the binary op; if 1, emits vnot of the binary op.
class XXEvalBinaryPattern<ValueType vt, SDPatternOperator op, bit not = 0> {
- dag opPat = !if( !eq(not, 0),
- BinaryOpDag<vt, op>.OpDag,
- BinaryOpDag<vt, op>.VnotOpDag
- );
+ dag opPat = !if(!eq(not, 0),
+ // DAG for the binary operation.
+ !if(!eq(vt, v4i32),
+ (op vt:$vB, vt:$vC),
+ (vt (bitconvert (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC)))))),
+ // DAG for the binary operation with a NOT applied to the result.
+ !if(!eq(vt, v4i32),
+ (vnot (op vt:$vB, vt:$vC)),
+ (vt (bitconvert (vnot (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC))))))));
}
multiclass XXEvalVSelectWithXAnd<ValueType vt, bits<8> baseImm> {
@@ -2389,7 +2378,7 @@ multiclass XXEvalVSelectWithXXorCast<ValueType vt, bits<8> baseImm>{
// Instantiate XXEval patterns for all vector types
let Predicates = [HasP10Vector] in {
- let AddedComplexity = 500 in {
+ let AddedComplexity = 400 in {
// For types directly supported by XXEVAL (v4i32, v2i64)
foreach type = [v4i32, v2i64] in {
defm : XXEvalVSelectWithXAnd<type, 22>;
>From ffbbf9dde3a214e6b8a951f25e25343e2580e854 Mon Sep 17 00:00:00 2001
From: Tony Varghese <tony.varghese at ibm.com>
Date: Mon, 21 Jul 2025 13:53:52 +0000
Subject: [PATCH 3/3] Support only the vselect(A, X, and(B,C)) Operations
---
llvm/lib/Target/PowerPC/PPCInstrP10.td | 403 ++++++++----------
.../CodeGen/PowerPC/xxeval-vselect-x-b.ll | 50 ++-
.../CodeGen/PowerPC/xxeval-vselect-x-c.ll | 50 ++-
.../CodeGen/PowerPC/xxeval-vselect-x-xor.ll | 74 +++-
4 files changed, 290 insertions(+), 287 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td
index 19ef59cf50faa..4e314649ff192 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrP10.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td
@@ -2159,242 +2159,170 @@ let AddedComplexity = 400, Predicates = [IsISA3_1, HasVSX] in {
(COPY_TO_REGCLASS $VRB, VSRC), 2)))>;
}
- // Defines a pattern for the XXEVAL instruction with a specific value type,
- // pattern, and immediate.
-class XXEvalPattern <ValueType vt, dag pattern, bits<8> imm> :
- Pat<(vt pattern), (XXEVAL $vA, $vB, $vC, imm)> {}
-
- // Helper class to generate unary NOT patterns for vector types.
- // For v4i32, emits (vnot B) or (vnot C).
- // For other types, bitcasts operand to v4i32, applies vnot, then bitcasts back.
-class XXEvalUnaryNotPattern<ValueType vt> {
- dag vnotB = !if( !eq(vt, v4i32),
- (vnot vt:$vB),
- (vt (bitconvert (vnot (v4i32 (bitconvert vt:$vB)))))
- );
- dag vnotC = !if( !eq(vt, v4i32),
- (vnot vt:$vC),
- (vt (bitconvert (vnot (v4i32 (bitconvert vt:$vC)))))
- );
-}
-
- // Wrapper class for binary patterns with optional NOT on the result.
- // If 'not' is 0, emits the binary op; if 1, emits vnot of the binary op.
-class XXEvalBinaryPattern<ValueType vt, SDPatternOperator op, bit not = 0> {
- dag opPat = !if(!eq(not, 0),
- // DAG for the binary operation.
- !if(!eq(vt, v4i32),
- (op vt:$vB, vt:$vC),
- (vt (bitconvert (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC)))))),
- // DAG for the binary operation with a NOT applied to the result.
- !if(!eq(vt, v4i32),
- (vnot (op vt:$vB, vt:$vC)),
- (vt (bitconvert (vnot (op (v4i32 (bitconvert vt:$vB)), (v4i32 (bitconvert vt:$vC))))))));
-}
-
-multiclass XXEvalVSelectWithXAnd<ValueType vt, bits<8> baseImm> {
- // Multiclass for ternary patterns of the form vselect(A, X, and(B, C)).
- // vselect(A, xor(B,C), and(B,C)) => imm = baseImm = 22
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
- baseImm>;
- // vselect(A, nor(B,C), and(B,C)) => imm = baseImm + 2 = 24
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 2)>;
- // vselect(A, eqv(B,C), and(B,C)) => imm = baseImm + 3 = 25
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 3)>;
- // vselect(A, not(C), and(B,C)) => imm = baseImm + 4 = 26
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotC, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 4)>;
- // vselect(A, not(B), and(B,C)) => imm = baseImm + 6 = 28
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotB, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 6)>;
-}
-
-multiclass XXEvalVSelectWithXB<ValueType vt, bits<8> baseImm>{
- // Multiclass for ternary patterns of the form vselect(A, X, B).
- // vselect(A, and(B,C), B) => imm = baseImm = 49
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vB),
- baseImm>;
- // vselect(A, nor(B,C), B) => imm = baseImm + 7 = 56
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vB),
- !add(baseImm, 7)>;
- // vselect(A, eqv(B,C), B) => imm = baseImm + 8 = 57
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vB),
- !add(baseImm, 8)>;
- // vselect(A, nand(B,C), B) => imm = baseImm + 13 = 62
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vB),
- !add(baseImm, 13)>;
-}
-
-multiclass XXEvalVSelectWithXC<ValueType vt, bits<8> baseImm>{
- // Multiclass for ternary patterns of the form vselect(A, X, C).
- // vselect(A, and(B,C), C) => imm = baseImm = 81
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vC),
- baseImm>;
- // vselect(A, nor(B,C), C) => imm = baseImm + 7 = 88
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vC),
- !add(baseImm, 7)>;
- // vselect(A, eqv(B,C), C) => imm = baseImm + 8 = 89
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vC),
- !add(baseImm, 8)>;
- // vselect(A, nand(B,C), C) => imm = baseImm + 13 = 94
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vC),
- !add(baseImm, 13)>;
-}
-
-multiclass XXEvalVSelectWithXXor<ValueType vt, bits<8> baseImm>{
- // Multiclass for ternary patterns of the form vselect(A, X, xor(B,C)).
- // vselect(A, and(B,C), xor(B,C)) => imm = baseImm = 97
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
- baseImm>;
- // vselect(A, B, xor(B,C)) => imm = baseImm + 2 = 99
- def : XXEvalPattern<vt,
- (vselect vt:$vA, vt:$vB, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 2)>;
- // vselect(A, C, xor(B,C)) => imm = baseImm + 4 = 101
- def : XXEvalPattern<vt,
- (vselect vt:$vA, vt:$vC, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 4)>;
- // vselect(A, or(B,C), xor(B,C)) => imm = baseImm + 6 = 103
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 6)>;
- // vselect(A, nor(B,C), xor(B,C)) => imm = baseImm + 7 = 104
- def : XXEvalPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 7)>;
-}
-
- // Pattern class using COPY_TO_REGCLASS for type casting
-class XXEvalBitcastPattern<ValueType vt, dag pattern, bits<8> imm> :
- Pat<(vt pattern),
+ // =============================================================================
+ // XXEVAL Instruction Pattern Definitions
+ // =============================================================================
+ //
+ // The XXEVAL instruction can perform ternary equivalent operations
+ // where the equivalent function is determined by an 8-bit immediate value.
+ // XXEVAL has the form: xxeval XT,XA,XB,XC,IMM
+ // Equivalent function A?xor(B,C):and(B,C) is performed if the IMM value is 22.
+ //
+ // REGISTER CLASS CONSTRAINTS:
+ // - XXEVAL natively supports: VSRC register class [v4i32, v4f32, v2f64, v2i64]
+ // - Other vector types [v16i8, v8i16] require COPY_TO_REGCLASS to/from VRRC
+ //
+ // PATTERN STRATEGY:
+ // - XXEvalPattern: Direct patterns for VSRC-supported types
+ // - XXEvalVRRC: Patterns with register class conversion for VRRC types
+ // =============================================================================
+
+ // Defines a pattern for XXEVAL instruction with native VSRC register class support.
+ // Used for types that XXEVAL directly supports without register class conversion.
+class XXEvalPattern<ValueType Vt, dag Pattern, bits<8> Imm> :
+ Pat<(Vt Pattern), (XXEVAL $vA, $vB, $vC, Imm)> {}
+
+ // Defines a pattern for XXEVAL instruction requiring VRRC→VSRC register class conversion.
+ // Used for vector types not natively supported by XXEVAL (v16i8, v8i16).
+ // Wraps inputs/outputs with COPY_TO_REGCLASS to handle register class mismatch.
+class XXEvalVRRC<ValueType Vt, dag Pattern, bits<8> Imm> :
+ Pat<(Vt Pattern),
(COPY_TO_REGCLASS
(XXEVAL
- (COPY_TO_REGCLASS vt:$vA, VSRC),
- (COPY_TO_REGCLASS vt:$vB, VSRC),
- (COPY_TO_REGCLASS vt:$vC, VSRC),
- imm),
+ (COPY_TO_REGCLASS Vt:$vA, VSRC),
+ (COPY_TO_REGCLASS Vt:$vB, VSRC),
+ (COPY_TO_REGCLASS Vt:$vC, VSRC),
+ Imm),
VRRC)>;
-multiclass XXEvalVSelectWithXAndCast<ValueType vt, bits<8> baseImm> {
- // Multiclass for ternary patterns using COPY_TO_REGCLASS for unsupported types
- // vselect(A, xor(B,C), and(B,C)) => imm = baseImm = 22
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
- baseImm>;
- // vselect(A, nor(B,C), and(B,C)) => imm = baseImm + 2 = 24
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 2)>;
- // vselect(A, eqv(B,C), and(B,C)) => imm = baseImm + 3 = 25
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 3)>;
- // vselect(A, not(C), and(B,C)) => imm = baseImm + 4 = 26
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotC, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 4)>;
- // vselect(A, not(B), and(B,C)) => imm = baseImm + 6 = 28
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalUnaryNotPattern<vt>.vnotB, XXEvalBinaryPattern<vt, and>.opPat),
- !add(baseImm, 6)>;
-}
-
-multiclass XXEvalVSelectWithXBCast<ValueType vt, bits<8> baseImm>{
- // vselect(A, and(B,C), B) => imm = baseImm = 49
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vB),
- baseImm>;
- // vselect(A, nor(B,C), B) => imm = baseImm + 7 = 56
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vB),
- !add(baseImm, 7)>;
- // vselect(A, eqv(B,C), B) => imm = baseImm + 8 = 57
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vB),
- !add(baseImm, 8)>;
- // vselect(A, nand(B,C), B) => imm = baseImm + 13 = 62
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vB),
- !add(baseImm, 13)>;
-}
-
-multiclass XXEvalVSelectWithXCCast<ValueType vt, bits<8> baseImm>{
- // vselect(A, and(B,C), C) => imm = baseImm = 81
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, vt:$vC),
- baseImm>;
- // vselect(A, nor(B,C), C) => imm = baseImm + 7 = 88
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, vt:$vC),
- !add(baseImm, 7)>;
- // vselect(A, eqv(B,C), C) => imm = baseImm + 8 = 89
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, xor, 1>.opPat, vt:$vC),
- !add(baseImm, 8)>;
- // vselect(A, nand(B,C), C) => imm = baseImm + 13 = 94
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and, 1>.opPat, vt:$vC),
- !add(baseImm, 13)>;
-}
-
-multiclass XXEvalVSelectWithXXorCast<ValueType vt, bits<8> baseImm>{
- // vselect(A, and(B,C), xor(B,C)) => imm = baseImm = 97
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, and>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
- baseImm>;
- // vselect(A, B, xor(B,C)) => imm = baseImm + 2 = 99
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, vt:$vB, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 2)>;
- // vselect(A, C, xor(B,C)) => imm = baseImm + 4 = 101
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, vt:$vC, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 4)>;
- // vselect(A, or(B,C), xor(B,C)) => imm = baseImm + 6 = 103
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 6)>;
- // vselect(A, nor(B,C), xor(B,C)) => imm = baseImm + 7 = 104
- def : XXEvalBitcastPattern<vt,
- (vselect vt:$vA, XXEvalBinaryPattern<vt, or, 1>.opPat, XXEvalBinaryPattern<vt, xor>.opPat),
- !add(baseImm, 7)>;
-}
-
-// Instantiate XXEval patterns for all vector types
-let Predicates = [HasP10Vector] in {
- let AddedComplexity = 400 in {
- // For types directly supported by XXEVAL (v4i32, v2i64)
- foreach type = [v4i32, v2i64] in {
- defm : XXEvalVSelectWithXAnd<type, 22>;
- defm : XXEvalVSelectWithXB<type, 49>;
- defm : XXEvalVSelectWithXC<type, 81>;
- defm : XXEvalVSelectWithXXor<type, 97>;
- }
-
- // For types that need COPY_TO_REGCLASS (v8i16, v16i8)
- foreach type = [v8i16, v16i8] in {
- defm : XXEvalVSelectWithXAndCast<type, 22>;
- defm : XXEvalVSelectWithXBCast<type, 49>;
- defm : XXEvalVSelectWithXCCast<type, 81>;
- defm : XXEvalVSelectWithXXorCast<type, 97>;
- }
- }
+ // =============================================================================
+ // Helper Classes for Type-Aware Operation Generation
+ // =============================================================================
+ //
+ // These helpers abstract the complexity of handling both XXEVAL-native types
+ // (v4i32, v2i64) and non-native types (v8i16, v16i8) that require bitcasting.
+ //
+ // BITCASTING STRATEGY:
+ // - For v4i32: Use operation directly (no bitcast needed)
+ // - For other types: bitcast → v4i32 → operation → bitcast back to original type
+ // =============================================================================
+
+ // Generates bitcast-aware unary NOT operations for any vector type.
+ // Handles the type conversion complexity transparently.
+ //
+ // USAGE: XXEvalNot<v8i16>.B generates vnot for $vB operand of type v8i16
+ // XXEvalNot<v8i16>.C generates vnot for $vC operand of type v8i16
+class XXEvalNot<ValueType Vt> {
+ // NOT operation on $vB operand, with type-appropriate bitcasting
+ dag B = !if(!eq(Vt, v4i32),
+ (vnot Vt:$vB), // Direct: v4i32 native
+ (Vt (bitconvert (vnot (v4i32 (bitconvert Vt:$vB)))))); // Bitcast: other types
+
+ // NOT operation on $vC operand, with type-appropriate bitcasting
+ dag C = !if(!eq(Vt, v4i32),
+ (vnot Vt:$vC), // Direct: v4i32 native
+ (Vt (bitconvert (vnot (v4i32 (bitconvert Vt:$vC)))))); // Bitcast: other types
+}
+
+ // Generates bitcast-aware binary operations (and, or, xor) for any vector type.
+ // Supports optional logical inversion of the result (for NOR, EQV operations).
+ //
+ // PARAMETERS:
+ // Vt: Vector type (v4i32, v8i16, v16i8, v2i64)
+ // Op: Binary operation (and, or, xor)
+ // Negate: 0=direct operation, 1=NOT(operation) for NOR/EQV patterns
+ //
+ // USAGE: XXEvalBinOp<v8i16, xor>.pattern // XOR of two v8i16 operands
+ // XXEvalBinOp<v8i16, or, 1>.pattern // NOR of two v8i16 operands (or + not)
+class XXEvalBinOp<ValueType Vt, SDPatternOperator Op, bit Negate = 0> {
+ dag pattern = !if(!eq(Negate, 0),
+ // Direct binary operation (and, or, xor)
+ !if(!eq(Vt, v4i32),
+ (Op Vt:$vB, Vt:$vC), // Direct: v4i32 native
+ (Vt (bitconvert (Op (v4i32 (bitconvert Vt:$vB)), // Bitcast: other types
+ (v4i32 (bitconvert Vt:$vC)))))),
+ // Inverted binary operation (nor, eqv)
+ !if(!eq(Vt, v4i32),
+ (vnot (Op Vt:$vB, Vt:$vC)), // Direct: v4i32 native
+ (Vt (bitconvert (vnot (Op (v4i32 (bitconvert Vt:$vB)), // Bitcast: other types
+ (v4i32 (bitconvert Vt:$vC))))))));
+}
+
+ // =============================================================================
+ // XXEVAL Ternary Pattern Multiclasses
+ // =============================================================================
+ //
+ // These multiclasses generate patterns for XXEVAL instructions that implement
+ // complex ternary boolean functions of the form: vselect(A, f(B,C), g(B,C))
+ //
+ // The specific immediate values correspond to PowerPC XXEVAL instruction
+ // encodings for various boolean functions.
+ // =============================================================================
+
+ // Generates XXEVAL patterns for types with native VSRC register class support.
+ // Implements: vselect(A, <various functions of B,C>, and(B,C))
+ //
+ // SUPPORTED TYPES: v4i32, v2i64, v4f32, v2f64 (VSRC register class)
+ // IMMEDIATE ENCODING: BaseImm + offset determines the boolean function
+multiclass XXEvalXAnd<ValueType Vt, bits<8> BaseImm> {
+ // vselect(A, xor(B,C), and(B,C)) => Imm Value 22
+ def : XXEvalPattern<Vt,
+ (vselect Vt:$vA, XXEvalBinOp<Vt, xor>.pattern, XXEvalBinOp<Vt, and>.pattern),
+ BaseImm>;
+
+ // vselect(A, nor(B,C), and(B,C)) => Imm Value 24
+ def : XXEvalPattern<Vt,
+ (vselect Vt:$vA, XXEvalBinOp<Vt, or, 1>.pattern, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 2)>;
+
+ // vselect(A, eqv(B,C), and(B,C)) => Imm Value 25
+ // EQV = NOT(XOR) = equivalence operation
+ def : XXEvalPattern<Vt,
+ (vselect Vt:$vA, XXEvalBinOp<Vt, xor, 1>.pattern, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 3)>;
+
+ // vselect(A, not(C), and(B,C)) => Imm Value 26
+ def : XXEvalPattern<Vt,
+ (vselect Vt:$vA, XXEvalNot<Vt>.C, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 4)>;
+
+ // vselect(A, not(B), and(B,C)) => Imm Value 28
+ def : XXEvalPattern<Vt,
+ (vselect Vt:$vA, XXEvalNot<Vt>.B, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 6)>;
+}
+
+ // Generates XXEVAL patterns for types requiring VRRC register class conversion.
+ // Identical boolean functions to XXEvalXAnd, but with register class handling.
+ //
+ // SUPPORTED TYPES: v8i16, v16i8 (VRRC register class)
+ // REGISTER CONVERSION: VRRC → VSRC → XXEVAL → VRRC
+ // IMMEDIATE ENCODING: Same as XXEvalXAnd (BaseImm + offset)
+multiclass XXEvalXAndVRRC<ValueType Vt, bits<8> BaseImm> {
+ // vselect(A, xor(B,C), and(B,C)) => Imm Value 22
+ def : XXEvalVRRC<Vt,
+ (vselect Vt:$vA, XXEvalBinOp<Vt, xor>.pattern, XXEvalBinOp<Vt, and>.pattern),
+ BaseImm>;
+
+ // vselect(A, nor(B,C), and(B,C)) => Imm Value 24
+ def : XXEvalVRRC<Vt,
+ (vselect Vt:$vA, XXEvalBinOp<Vt, or, 1>.pattern, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 2)>;
+
+ // vselect(A, eqv(B,C), and(B,C)) => Imm Value 25
+ def : XXEvalVRRC<Vt,
+ (vselect Vt:$vA, XXEvalBinOp<Vt, xor, 1>.pattern, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 3)>;
+
+ // vselect(A, not(C), and(B,C)) => Imm Value 26
+ def : XXEvalVRRC<Vt,
+ (vselect Vt:$vA, XXEvalNot<Vt>.C, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 4)>;
+
+ // vselect(A, not(B), and(B,C)) => Imm Value 28
+ def : XXEvalVRRC<Vt,
+ (vselect Vt:$vA, XXEvalNot<Vt>.B, XXEvalBinOp<Vt, and>.pattern),
+ !add(BaseImm, 6)>;
}
let Predicates = [PrefixInstrs, HasP10Vector] in {
@@ -2505,6 +2433,17 @@ let Predicates = [PrefixInstrs, HasP10Vector] in {
// (xor A, (or B, C))
def : XXEvalPattern<v4i32, (xor v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 120>;
+ // Add XXEval Patterns for ternary Operations.
+ // For VSRC-native types (direct XXEVAL support)
+ foreach Ty = [v4i32, v2i64] in {
+ defm : XXEvalXAnd<Ty, 22>;
+ }
+
+ // For VRRC types (requiring register class conversion)
+ foreach Ty = [v8i16, v16i8] in {
+ defm : XXEvalXAndVRRC<Ty, 22>;
+ }
+
// Anonymous patterns to select prefixed VSX loads and stores.
// Load / Store f128
def : Pat<(f128 (load PDForm:$src)),
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
index 8fd2453266706..c366fd5f0a8c2 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
+; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,9 +15,10 @@ define <4 x i32> @ternary_A_and_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_and_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -30,10 +31,11 @@ define <2 x i64> @ternary_A_and_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_and_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -46,9 +48,10 @@ define <16 x i8> @ternary_A_and_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_and_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -61,9 +64,10 @@ define <8 x i16> @ternary_A_and_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_and_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -76,9 +80,10 @@ define <4 x i32> @ternary_A_nor_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_nor_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -92,10 +97,11 @@ define <2 x i64> @ternary_A_nor_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_nor_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -109,9 +115,10 @@ define <16 x i8> @ternary_A_nor_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nor_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -125,9 +132,10 @@ define <8 x i16> @ternary_A_nor_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_nor_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -141,9 +149,10 @@ define <4 x i32> @ternary_A_eqv_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_eqv_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -157,10 +166,11 @@ define <2 x i64> @ternary_A_eqv_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_eqv_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -174,9 +184,10 @@ define <16 x i8> @ternary_A_eqv_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_eqv_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -190,9 +201,10 @@ define <8 x i16> @ternary_A_eqv_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_eqv_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -206,9 +218,10 @@ define <4 x i32> @ternary_A_nand_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_nand_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -222,10 +235,11 @@ define <2 x i64> @ternary_A_nand_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_nand_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -239,9 +253,10 @@ define <16 x i8> @ternary_A_nand_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nand_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -255,9 +270,10 @@ define <8 x i16> @ternary_A_nand_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_nand_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
+; CHECK-NEXT: xxsel v2, v3, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
index c25288df78af6..f70f1d093f069 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
+; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,9 +15,10 @@ define <4 x i32> @ternary_A_and_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_and_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -30,10 +31,11 @@ define <2 x i64> @ternary_A_and_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_and_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -46,9 +48,10 @@ define <16 x i8> @ternary_A_and_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_and_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -61,9 +64,10 @@ define <8 x i16> @ternary_A_and_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_and_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -76,9 +80,10 @@ define <4 x i32> @ternary_A_nor_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_nor_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -92,10 +97,11 @@ define <2 x i64> @ternary_A_nor_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_nor_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -109,9 +115,10 @@ define <16 x i8> @ternary_A_nor_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nor_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -125,9 +132,10 @@ define <8 x i16> @ternary_A_nor_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_nor_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -141,9 +149,10 @@ define <4 x i32> @ternary_A_eqv_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_eqv_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -157,10 +166,11 @@ define <2 x i64> @ternary_A_eqv_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_eqv_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -174,9 +184,10 @@ define <16 x i8> @ternary_A_eqv_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_eqv_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -190,9 +201,10 @@ define <8 x i16> @ternary_A_eqv_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_eqv_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -206,9 +218,10 @@ define <4 x i32> @ternary_A_nand_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_nand_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -222,10 +235,11 @@ define <2 x i64> @ternary_A_nand_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_nand_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -239,9 +253,10 @@ define <16 x i8> @ternary_A_nand_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nand_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -255,9 +270,10 @@ define <8 x i16> @ternary_A_nand_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_nand_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
+; CHECK-NEXT: xxsel v2, v4, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll
index 0fc296cc5a4e2..a3fdc905cb52c 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-xor.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
+; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,9 +15,11 @@ define <4 x i32> @ternary_A_and_BC_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_and_BC_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxland vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -31,10 +33,12 @@ define <2 x i64> @ternary_A_and_BC_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_and_BC_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxland vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -48,9 +52,11 @@ define <16 x i8> @ternary_A_and_BC_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_and_BC_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxland vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -64,9 +70,11 @@ define <8 x i16> @ternary_A_and_BC_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_and_BC_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxland vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 97
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -80,9 +88,10 @@ define <4 x i32> @ternary_A_B_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_B_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
+; CHECK-NEXT: xxsel v2, vs0, v3, v2
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -95,10 +104,11 @@ define <2 x i64> @ternary_A_B_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_B_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
+; CHECK-NEXT: xxsel v2, vs0, v3, v2
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -111,9 +121,10 @@ define <16 x i8> @ternary_A_B_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_B_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
+; CHECK-NEXT: xxsel v2, vs0, v3, v2
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -126,9 +137,10 @@ define <8 x i16> @ternary_A_B_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_B_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 99
+; CHECK-NEXT: xxsel v2, vs0, v3, v2
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -141,9 +153,10 @@ define <4 x i32> @ternary_A_C_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_C_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
+; CHECK-NEXT: xxsel v2, vs0, v4, v2
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -156,10 +169,11 @@ define <2 x i64> @ternary_A_C_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_C_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
+; CHECK-NEXT: xxsel v2, vs0, v4, v2
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -172,9 +186,10 @@ define <16 x i8> @ternary_A_C_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_C_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
+; CHECK-NEXT: xxsel v2, vs0, v4, v2
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -187,9 +202,10 @@ define <8 x i16> @ternary_A_C_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_C_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlxor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 101
+; CHECK-NEXT: xxsel v2, vs0, v4, v2
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -202,9 +218,11 @@ define <4 x i32> @ternary_A_or_BC_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_or_BC_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -218,10 +236,12 @@ define <2 x i64> @ternary_A_or_BC_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_or_BC_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -235,9 +255,11 @@ define <16 x i8> @ternary_A_or_BC_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_or_BC_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -251,9 +273,11 @@ define <8 x i16> @ternary_A_or_BC_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_or_BC_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 103
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -267,9 +291,11 @@ define <4 x i32> @ternary_A_nor_BC_xor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
+; CHECK-NEXT: xxlnor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -284,10 +310,12 @@ define <2 x i64> @ternary_A_nor_BC_xor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
+; CHECK-NEXT: xxlnor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -302,9 +330,11 @@ define <16 x i8> @ternary_A_nor_BC_xor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
+; CHECK-NEXT: xxlnor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -319,9 +349,11 @@ define <8 x i16> @ternary_A_nor_BC_xor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_nor_BC_xor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
+; CHECK-NEXT: xxlnor vs0, v3, v4
+; CHECK-NEXT: xxlxor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, v3, v4, 104
+; CHECK-NEXT: xxsel v2, vs1, vs0, v2
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
More information about the llvm-commits
mailing list