[clang] [CIR][X86] Implement lowering for AVX512 ktest builtins (PR #169985)
via cfe-commits
cfe-commits at lists.llvm.org
Sat Nov 29 02:24:30 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clangir
Author: AIT (GeneraluseAI)
<details>
<summary>Changes</summary>
part of [#<!-- -->167765](https://github.com/llvm/llvm-project/issues/167765)
This is a follow-up patch to continue the work from <[#<!-- -->169774](https://github.com/llvm/llvm-project/pull/169774)>.
It builds on the previous lowering infrastructure and extends support to the AVX512 ktest builtins.
---
Patch is 48.58 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/169985.diff
4 Files Affected:
- (modified) clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp (+93-4)
- (modified) clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c (+474)
- (added) clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c (+275)
- (modified) clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c (+207)
``````````diff
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
index 0e43345bad6f1..1b6dd54d32646 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -85,6 +85,36 @@ static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, mlir::Location loc,
return maskVec;
}
+static mlir::Value emitX86MaskAddLogic(CIRGenBuilderTy &builder,
+ mlir::Location loc,
+ const std::string &intrinsicName,
+ SmallVectorImpl<mlir::Value> &ops) {
+
+ auto intTy = cast<cir::IntType>(ops[0].getType());
+ unsigned numElts = intTy.getWidth();
+ mlir::Value lhsVec = getMaskVecValue(builder, loc, ops[0], numElts);
+ mlir::Value rhsVec = getMaskVecValue(builder, loc, ops[1], numElts);
+ mlir::Type vecTy = lhsVec.getType();
+ mlir::Value resVec = emitIntrinsicCallOp(builder, loc, intrinsicName, vecTy,
+ mlir::ValueRange{lhsVec, rhsVec});
+ return builder.createBitcast(resVec, ops[0].getType());
+}
+
+static mlir::Value emitX86MaskLogic(CIRGenBuilderTy &builder,
+ mlir::Location loc,
+ cir::BinOpKind binOpKind,
+ SmallVectorImpl<mlir::Value> &ops,
+ bool invertLHS = false) {
+ unsigned numElts = cast<cir::IntType>(ops[0].getType()).getWidth();
+ mlir::Value lhs = getMaskVecValue(builder, loc, ops[0], numElts);
+ mlir::Value rhs = getMaskVecValue(builder, loc, ops[1], numElts);
+
+ if (invertLHS)
+ lhs = builder.createNot(lhs);
+ return builder.createBitcast(builder.createBinop(loc, lhs, binOpKind, rhs),
+ ops[0].getType());
+}
+
mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
const CallExpr *expr) {
if (builtinID == Builtin::BI__builtin_cpu_is) {
@@ -727,14 +757,40 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
case X86::BI__builtin_ia32_vpcomuw:
case X86::BI__builtin_ia32_vpcomud:
case X86::BI__builtin_ia32_vpcomuq:
+ cgm.errorNYI(expr->getSourceRange(),
+ std::string("unimplemented X86 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return {};
case X86::BI__builtin_ia32_kortestcqi:
case X86::BI__builtin_ia32_kortestchi:
case X86::BI__builtin_ia32_kortestcsi:
- case X86::BI__builtin_ia32_kortestcdi:
+ case X86::BI__builtin_ia32_kortestcdi: {
+ mlir::Location loc = getLoc(expr->getExprLoc());
+ cir::IntType ty = cast<cir::IntType>(ops[0].getType());
+ cir::IntAttr allOnesAttr =
+ cir::IntAttr::get(ty, APInt::getAllOnes(ty.getWidth()));
+ cir::ConstantOp allOnesOp = builder.getConstant(loc, allOnesAttr);
+ mlir::Value orOp = emitX86MaskLogic(builder, loc, cir::BinOpKind::Or, ops);
+ mlir::Value cmp =
+ cir::CmpOp::create(builder, loc, cir::CmpOpKind::eq, orOp, allOnesOp);
+ return builder.createCast(cir::CastKind::bool_to_int, cmp,
+ cgm.convertType(expr->getType()));
+ }
case X86::BI__builtin_ia32_kortestzqi:
case X86::BI__builtin_ia32_kortestzhi:
case X86::BI__builtin_ia32_kortestzsi:
- case X86::BI__builtin_ia32_kortestzdi:
+ case X86::BI__builtin_ia32_kortestzdi: {
+ mlir::Location loc = getLoc(expr->getExprLoc());
+ cir::IntType ty = cast<cir::IntType>(ops[0].getType());
+ cir::IntAttr allZerosAttr =
+ cir::IntAttr::get(ty, APInt::getZero(ty.getWidth()));
+ cir::ConstantOp allZerosOp = builder.getConstant(loc, allZerosAttr);
+ mlir::Value orOp = emitX86MaskLogic(builder, loc, cir::BinOpKind::Or, ops);
+ mlir::Value cmp =
+ cir::CmpOp::create(builder, loc, cir::CmpOpKind::eq, orOp, allZerosOp);
+ return builder.createCast(cir::CastKind::bool_to_int, cmp,
+ cgm.convertType(expr->getType()));
+ }
case X86::BI__builtin_ia32_ktestcqi:
case X86::BI__builtin_ia32_ktestzqi:
case X86::BI__builtin_ia32_ktestchi:
@@ -744,37 +800,70 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
case X86::BI__builtin_ia32_ktestcdi:
case X86::BI__builtin_ia32_ktestzdi:
case X86::BI__builtin_ia32_kaddqi:
+ return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
+ "x86.avx512.kadd.b", ops);
case X86::BI__builtin_ia32_kaddhi:
+ return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
+ "x86.avx512.kadd.w", ops);
case X86::BI__builtin_ia32_kaddsi:
+ return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
+ "x86.avx512.kadd.d", ops);
case X86::BI__builtin_ia32_kadddi:
+ return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
+ "x86.avx512.kadd.q", ops);
case X86::BI__builtin_ia32_kandqi:
case X86::BI__builtin_ia32_kandhi:
case X86::BI__builtin_ia32_kandsi:
case X86::BI__builtin_ia32_kanddi:
+ return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
+ cir::BinOpKind::And, ops);
case X86::BI__builtin_ia32_kandnqi:
case X86::BI__builtin_ia32_kandnhi:
case X86::BI__builtin_ia32_kandnsi:
case X86::BI__builtin_ia32_kandndi:
+ return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
+ cir::BinOpKind::And, ops, true);
case X86::BI__builtin_ia32_korqi:
case X86::BI__builtin_ia32_korhi:
case X86::BI__builtin_ia32_korsi:
case X86::BI__builtin_ia32_kordi:
+ return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
+ cir::BinOpKind::Or, ops);
case X86::BI__builtin_ia32_kxnorqi:
case X86::BI__builtin_ia32_kxnorhi:
case X86::BI__builtin_ia32_kxnorsi:
case X86::BI__builtin_ia32_kxnordi:
+ return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
+ cir::BinOpKind::Xor, ops, true);
case X86::BI__builtin_ia32_kxorqi:
case X86::BI__builtin_ia32_kxorhi:
case X86::BI__builtin_ia32_kxorsi:
case X86::BI__builtin_ia32_kxordi:
+ return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
+ cir::BinOpKind::Xor, ops);
case X86::BI__builtin_ia32_knotqi:
case X86::BI__builtin_ia32_knothi:
case X86::BI__builtin_ia32_knotsi:
- case X86::BI__builtin_ia32_knotdi:
+ case X86::BI__builtin_ia32_knotdi: {
+ cir::IntType intTy = cast<cir::IntType>(ops[0].getType());
+ unsigned numElts = intTy.getWidth();
+ mlir::Value resVec =
+ getMaskVecValue(builder, getLoc(expr->getExprLoc()), ops[0], numElts);
+ return builder.createBitcast(builder.createNot(resVec), ops[0].getType());
+ }
case X86::BI__builtin_ia32_kmovb:
case X86::BI__builtin_ia32_kmovw:
case X86::BI__builtin_ia32_kmovd:
- case X86::BI__builtin_ia32_kmovq:
+ case X86::BI__builtin_ia32_kmovq: {
+ // Bitcast to vXi1 type and then back to integer. This gets the mask
+ // register type into the IR, but might be optimized out depending on
+ // what's around it.
+ cir::IntType intTy = cast<cir::IntType>(ops[0].getType());
+ unsigned numElts = intTy.getWidth();
+ mlir::Value resVec =
+ getMaskVecValue(builder, getLoc(expr->getExprLoc()), ops[0], numElts);
+ return builder.createBitcast(resVec, ops[0].getType());
+ }
case X86::BI__builtin_ia32_kunpckdi:
case X86::BI__builtin_ia32_kunpcksi:
case X86::BI__builtin_ia32_kunpckhi:
diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c
index 3522e2c7e50bf..9f0ca5874c589 100644
--- a/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c
+++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c
@@ -115,3 +115,477 @@ __mmask32 test_kshiftri_mask32_out_of_range(__mmask32 A) {
return _kshiftri_mask32(A, 33);
}
+
+
+__mmask32 test_kadd_mask32(__mmask32 A, __mmask32 B) {
+ // CIR-LABEL: _kadd_mask32
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.d"
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<u, 1>> -> !u32i
+
+ // LLVM-LABEL: _kadd_mask32
+ // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[RES:%.*]] = call <32 x i1> @llvm.x86.avx512.kadd.d(<32 x i1> [[L]], <32 x i1> [[R]])
+ // LLVM: bitcast <32 x i1> [[RES]] to i32
+
+ // OGCG-LABEL: _kadd_mask32
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: call <32 x i1> @llvm.x86.avx512.kadd.d
+ // OGCG: bitcast <32 x i1> {{.*}} to i32
+ return _kadd_mask32(A, B);
+}
+
+__mmask64 test_kadd_mask64(__mmask64 A, __mmask64 B) {
+ // CIR-LABEL: _kadd_mask64
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.q"
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<u, 1>> -> !u64i
+
+ // LLVM-LABEL: _kadd_mask64
+ // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[RES:%.*]] = call <64 x i1> @llvm.x86.avx512.kadd.q(<64 x i1> [[L]], <64 x i1> [[R]])
+ // LLVM: bitcast <64 x i1> [[RES]] to i64
+
+ // OGCG-LABEL: _kadd_mask64
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: call <64 x i1> @llvm.x86.avx512.kadd.q
+ // OGCG: bitcast <64 x i1> {{.*}} to i64
+ return _kadd_mask64(A, B);
+}
+
+__mmask32 test_kand_mask32(__mmask32 A, __mmask32 B) {
+ // CIR-LABEL: _kand_mask32
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<u, 1>> -> !u32i
+
+ // LLVM-LABEL: _kand_mask32
+ // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[RES:%.*]] = and <32 x i1> [[L]], [[R]]
+ // LLVM: bitcast <32 x i1> [[RES]] to i32
+
+ // OGCG-LABEL: _kand_mask32
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: and <32 x i1>
+ // OGCG: bitcast <32 x i1> {{.*}} to i32
+ return _kand_mask32(A, B);
+}
+
+__mmask64 test_kand_mask64(__mmask64 A, __mmask64 B) {
+ // CIR-LABEL: _kand_mask64
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<u, 1>> -> !u64i
+
+ // LLVM-LABEL: _kand_mask64
+ // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[RES:%.*]] = and <64 x i1> [[L]], [[R]]
+ // LLVM: bitcast <64 x i1> [[RES]] to i64
+
+ // OGCG-LABEL: _kand_mask64
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: and <64 x i1>
+ // OGCG: bitcast <64 x i1> {{.*}} to i64
+ return _kand_mask64(A, B);
+}
+
+__mmask32 test_kandn_mask32(__mmask32 A, __mmask32 B) {
+ // CIR-LABEL: _kandn_mask32
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<u, 1>> -> !u32i
+
+ // LLVM-LABEL: _kandn_mask32
+ // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: xor <32 x i1> [[L]], splat (i1 true)
+ // LLVM: and <32 x i1>
+ // LLVM: bitcast <32 x i1> {{.*}} to i32
+
+ // OGCG-LABEL: _kandn_mask32
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: xor <32 x i1>
+ // OGCG: and <32 x i1>
+ // OGCG: bitcast <32 x i1> {{.*}} to i32
+ return _kandn_mask32(A, B);
+}
+
+__mmask64 test_kandn_mask64(__mmask64 A, __mmask64 B) {
+ // CIR-LABEL: _kandn_mask64
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<u, 1>> -> !u64i
+
+ // LLVM-LABEL: _kandn_mask64
+ // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: xor <64 x i1> [[L]], splat (i1 true)
+ // LLVM: and <64 x i1>
+ // LLVM: bitcast <64 x i1> {{.*}} to i64
+
+ // OGCG-LABEL: _kandn_mask64
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: xor <64 x i1>
+ // OGCG: and <64 x i1>
+ // OGCG: bitcast <64 x i1> {{.*}} to i64
+ return _kandn_mask64(A, B);
+}
+
+__mmask32 test_kor_mask32(__mmask32 A, __mmask32 B) {
+ // CIR-LABEL: _kor_mask32
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<u, 1>> -> !u32i
+
+ // LLVM-LABEL: _kor_mask32
+ // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: or <32 x i1> [[L]], [[R]]
+ // LLVM: bitcast <32 x i1> {{.*}} to i32
+
+ // OGCG-LABEL: _kor_mask32
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: or <32 x i1>
+ // OGCG: bitcast <32 x i1> {{.*}} to i32
+ return _kor_mask32(A, B);
+}
+
+__mmask64 test_kor_mask64(__mmask64 A, __mmask64 B) {
+ // CIR-LABEL: _kor_mask64
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<u, 1>> -> !u64i
+
+ // LLVM-LABEL: _kor_mask64
+ // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: or <64 x i1> [[L]], [[R]]
+ // LLVM: bitcast <64 x i1> {{.*}} to i64
+
+ // OGCG-LABEL: _kor_mask64
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: or <64 x i1>
+ // OGCG: bitcast <64 x i1> {{.*}} to i64
+ return _kor_mask64(A, B);
+}
+
+__mmask32 test_kxor_mask32(__mmask32 A, __mmask32 B) {
+ // CIR-LABEL: _kxor_mask32
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<u, 1>> -> !u32i
+
+ // LLVM-LABEL: _kxor_mask32
+ // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: xor <32 x i1> [[L]], [[R]]
+ // LLVM: bitcast <32 x i1> {{.*}} to i32
+
+ // OGCG-LABEL: _kxor_mask32
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: xor <32 x i1>
+ // OGCG: bitcast <32 x i1> {{.*}} to i32
+ return _kxor_mask32(A, B);
+}
+
+__mmask64 test_kxor_mask64(__mmask64 A, __mmask64 B) {
+ // CIR-LABEL: _kxor_mask64
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<u, 1>> -> !u64i
+
+ // LLVM-LABEL: _kxor_mask64
+ // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: xor <64 x i1> [[L]], [[R]]
+ // LLVM: bitcast <64 x i1> {{.*}} to i64
+
+ // OGCG-LABEL: _kxor_mask64
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: xor <64 x i1>
+ // OGCG: bitcast <64 x i1> {{.*}} to i64
+ return _kxor_mask64(A, B);
+}
+
+__mmask32 test_kxnor_mask32(__mmask32 A, __mmask32 B) {
+ // CIR-LABEL: _kxnor_mask32
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<u, 1>> -> !u32i
+
+ // LLVM-LABEL: _kxnor_mask32
+ // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: [[NOT:%.*]] = xor <32 x i1> [[L]], splat (i1 true)
+ // LLVM: [[RES:%.*]] = xor <32 x i1> [[NOT]], [[R]]
+ // LLVM: bitcast <32 x i1> [[RES]] to i32
+
+ // OGCG-LABEL: _kxnor_mask32
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: xor <32 x i1>
+ // OGCG: xor <32 x i1>
+ // OGCG: bitcast <32 x i1> {{.*}} to i32
+
+ return _kxnor_mask32(A, B);
+}
+
+__mmask64 test_kxnor_mask64(__mmask64 A, __mmask64 B) {
+ // CIR-LABEL: _kxnor_mask64
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<u, 1>> -> !u64i
+
+ // LLVM-LABEL: _kxnor_mask64
+ // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: [[NOT:%.*]] = xor <64 x i1> [[L]], splat (i1 true)
+ // LLVM: [[RES:%.*]] = xor <64 x i1> [[NOT]], [[R]]
+ // LLVM: bitcast <64 x i1> [[RES]] to i64
+
+ // OGCG-LABEL: _kxnor_mask64
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: xor <64 x i1>
+ // OGCG: xor <64 x i1>
+ // OGCG: bitcast <64 x i1> {{.*}} to i64
+
+ return _kxnor_mask64(A, B);
+}
+
+
+__mmask32 test_knot_mask32(__mmask32 A) {
+ // CIR-LABEL: _knot_mask32
+ // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<u, 1>> -> !u32i
+
+ // LLVM-LABEL: _knot_mask32
+ // LLVM: bitcast i32 %{{.*}} to <32 x i1>
+ // LLVM: xor <32 x i1>
+ // LLVM: bitcast <32 x i1> {{.*}} to i32
+
+ // OGCG-LABEL: _knot_mask32
+ // OGCG: bitcast i32 %{{.*}} to <32 x i1>
+ // OGCG: xor <32 x i1>
+ // OGCG: bitcast <32 x i1> {{.*}} to i32
+ return _knot_mask32(A);
+}
+
+__mmask64 test_knot_mask64(__mmask64 A) {
+ // CIR-LABEL: _knot_mask64
+ // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int<u, 1>>
+ // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<u, 1>> -> !u64i
+
+ // LLVM-LABEL: _knot_mask64
+ // LLVM: bitcast i64 %{{.*}} to <64 x i1>
+ // LLVM: xor <64 x i1>
+ // LLVM: bitcast <64 x i1> {{.*}} to i64
+
+ // OGCG-LABEL: _knot_mask64
+ // OGCG: bitcast i64 %{{.*}} to <64 x i1>
+ // OGCG: xor <64 x i1>
+ // OGCG: bitcast <64 x i1> {{.*}} to i64
+ return _knot_mask64(A);
+}
+
+// Multiple user-level mask helpers inline to this same kmov...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/169985
More information about the cfe-commits
mailing list