[clang] [CIR] Implement Type promotion for VectorType (PR #158715)
Amr Hesham via cfe-commits
cfe-commits at lists.llvm.org
Fri Sep 19 11:03:26 PDT 2025
https://github.com/AmrDeveloper updated https://github.com/llvm/llvm-project/pull/158715
>From a6db44e76410edb509c260ac02a4fa3715c8f365 Mon Sep 17 00:00:00 2001
From: AmrDeveloper <amr96 at programmer.net>
Date: Mon, 15 Sep 2025 20:52:56 +0200
Subject: [PATCH] [CIR] Implement Type promotion for VectorType
---
clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 9 +++--
clang/test/CIR/CodeGen/vector-ext.cpp | 40 ++++++++++++++++++++++
clang/test/CIR/CodeGen/vector.cpp | 40 ++++++++++++++++++++++
3 files changed, 84 insertions(+), 5 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 7c2029387dda5..fcf30b7ae1419 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -878,10 +878,9 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
}
if (ty.UseExcessPrecision(cgf.getContext())) {
- if (ty->getAs<VectorType>()) {
- assert(!cir::MissingFeatures::vectorType());
- cgf.cgm.errorNYI("getPromotionType: promotion to vector type");
- return QualType();
+ if (auto *vt = ty->getAs<VectorType>()) {
+ unsigned numElements = vt->getNumElements();
+ return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
}
return cgf.getContext().FloatTy;
}
@@ -2344,4 +2343,4 @@ mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e,
bool isPre) {
return ScalarExprEmitter(*this, builder)
.emitScalarPrePostIncDec(e, lv, kind, isPre);
-}
+}
\ No newline at end of file
diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp
index ad94e6737c615..adf0d18acfab1 100644
--- a/clang/test/CIR/CodeGen/vector-ext.cpp
+++ b/clang/test/CIR/CodeGen/vector-ext.cpp
@@ -13,6 +13,7 @@ typedef int vi3 __attribute__((ext_vector_type(3)));
typedef int vi2 __attribute__((ext_vector_type(2)));
typedef float vf4 __attribute__((ext_vector_type(4)));
typedef double vd2 __attribute__((ext_vector_type(2)));
+typedef _Float16 vh4 __attribute__((ext_vector_type(4)));
vi4 vec_a;
// CIR: cir.global external @[[VEC_A:.*]] = #cir.zero : !cir.vector<4 x !s32i>
@@ -1216,3 +1217,42 @@ void foo22() {
// OGCG: %[[VEC_OR:.*]] = or <4 x i1> %[[NE_A_ZERO]], %[[NE_B_ZERO]]
// OGCG: %[[RESULT:.*]] = sext <4 x i1> %[[VEC_OR]] to <4 x i32>
// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16
+
+void foo24() {
+ vh4 a;
+ vh4 b;
+ vh4 c = a + b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["b"]
+// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["c", init]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
+// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
+// CIR: %[[TMP_B_F16:.*]] = cir.cast(floating, %[[TMP_B]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[RESULT:.*]] = cir.binop(add, %[[TMP_A_F16]], %[[TMP_B_F16]]) : !cir.vector<4 x !cir.float>
+// CIR: %[[RESULT_VF16:.*]] = cir.cast(floating, %[[RESULT]] : !cir.vector<4 x !cir.float>), !cir.vector<4 x !cir.f16>
+// CIR: cir.store{{.*}} %[[RESULT_VF16]], %[[C_ADDR]] : !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
+// LLVM: %[[B_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
+// LLVM: %[[C_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
+// LLVM: %[[TMP_A:.*]] = load <4 x half>, ptr %[[A_ADDR]], align 8
+// LLVM: %[[TMP_A_F16:.*]] = fpext <4 x half> %[[TMP_A]] to <4 x float>
+// LLVM: %[[TMP_B:.*]] = load <4 x half>, ptr %[[B_ADDR]], align 8
+// LLVM: %[[TMP_B_F16:.*]] = fpext <4 x half> %[[TMP_B]] to <4 x float>
+// LLVM: %[[RESULT:.*]] = fadd <4 x float> %[[TMP_A_F16]], %[[TMP_B_F16]]
+// LLVM: %[[RESULT_VF16:.*]] = fptrunc <4 x float> %[[RESULT]] to <4 x half>
+// LLVM: store <4 x half> %[[RESULT_VF16]], ptr %[[C_ADDR]], align 8
+
+// OGCG: %[[A_ADDR:.*]] = alloca <4 x half>, align 8
+// OGCG: %[[B_ADDR:.*]] = alloca <4 x half>, align 8
+// OGCG: %[[C_ADDR:.*]] = alloca <4 x half>, align 8
+// OGCG: %[[TMP_A:.*]] = load <4 x half>, ptr %[[A_ADDR]], align 8
+// OGCG: %[[TMP_A_F16:.*]] = fpext <4 x half> %[[TMP_A]] to <4 x float>
+// OGCG: %[[TMP_B:.*]] = load <4 x half>, ptr %[[B_ADDR]], align 8
+// OGCG: %[[TMP_B_F16:.*]] = fpext <4 x half> %[[TMP_B]] to <4 x float>
+// OGCG: %[[RESULT:.*]] = fadd <4 x float> %[[TMP_A_F16]], %[[TMP_B_F16]]
+// OGCG: %[[RESULT_VF16:.*]] = fptrunc <4 x float> %[[RESULT]] to <4 x half>
+// OGCG: store <4 x half> %[[RESULT_VF16]], ptr %[[C_ADDR]], align 8
diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp
index 72acd8b395623..7e148d4c49f6c 100644
--- a/clang/test/CIR/CodeGen/vector.cpp
+++ b/clang/test/CIR/CodeGen/vector.cpp
@@ -12,6 +12,7 @@ typedef unsigned int uvi4 __attribute__((vector_size(16)));
typedef float vf4 __attribute__((vector_size(16)));
typedef double vd2 __attribute__((vector_size(16)));
typedef long long vll2 __attribute__((vector_size(16)));
+typedef _Float16 vh4 __attribute__((vector_size(8)));
vi4 vec_a;
// CIR: cir.global external @[[VEC_A:.*]] = #cir.zero : !cir.vector<4 x !s32i>
@@ -1258,3 +1259,42 @@ void foo25() {
// OGCG: %[[VEC_OR:.*]] = or <4 x i1> %[[NE_A_ZERO]], %[[NE_B_ZERO]]
// OGCG: %[[RESULT:.*]] = sext <4 x i1> %[[VEC_OR]] to <4 x i32>
// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16
+
+void foo27() {
+ vh4 a;
+ vh4 b;
+ vh4 c = a + b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["b"]
+// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["c", init]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
+// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
+// CIR: %[[TMP_B_F16:.*]] = cir.cast(floating, %[[TMP_B]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[RESULT:.*]] = cir.binop(add, %[[TMP_A_F16]], %[[TMP_B_F16]]) : !cir.vector<4 x !cir.float>
+// CIR: %[[RESULT_VF16:.*]] = cir.cast(floating, %[[RESULT]] : !cir.vector<4 x !cir.float>), !cir.vector<4 x !cir.f16>
+// CIR: cir.store{{.*}} %[[RESULT_VF16]], %[[C_ADDR]] : !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
+// LLVM: %[[B_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
+// LLVM: %[[C_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
+// LLVM: %[[TMP_A:.*]] = load <4 x half>, ptr %[[A_ADDR]], align 8
+// LLVM: %[[TMP_A_F16:.*]] = fpext <4 x half> %[[TMP_A]] to <4 x float>
+// LLVM: %[[TMP_B:.*]] = load <4 x half>, ptr %[[B_ADDR]], align 8
+// LLVM: %[[TMP_B_F16:.*]] = fpext <4 x half> %[[TMP_B]] to <4 x float>
+// LLVM: %[[RESULT:.*]] = fadd <4 x float> %[[TMP_A_F16]], %[[TMP_B_F16]]
+// LLVM: %[[RESULT_VF16:.*]] = fptrunc <4 x float> %[[RESULT]] to <4 x half>
+// LLVM: store <4 x half> %[[RESULT_VF16]], ptr %[[C_ADDR]], align 8
+
+// OGCG: %[[A_ADDR:.*]] = alloca <4 x half>, align 8
+// OGCG: %[[B_ADDR:.*]] = alloca <4 x half>, align 8
+// OGCG: %[[C_ADDR:.*]] = alloca <4 x half>, align 8
+// OGCG: %[[TMP_A:.*]] = load <4 x half>, ptr %[[A_ADDR]], align 8
+// OGCG: %[[TMP_A_F16:.*]] = fpext <4 x half> %[[TMP_A]] to <4 x float>
+// OGCG: %[[TMP_B:.*]] = load <4 x half>, ptr %[[B_ADDR]], align 8
+// OGCG: %[[TMP_B_F16:.*]] = fpext <4 x half> %[[TMP_B]] to <4 x float>
+// OGCG: %[[RESULT:.*]] = fadd <4 x float> %[[TMP_A_F16]], %[[TMP_B_F16]]
+// OGCG: %[[RESULT_VF16:.*]] = fptrunc <4 x float> %[[RESULT]] to <4 x half>
+// OGCG: store <4 x half> %[[RESULT_VF16]], ptr %[[C_ADDR]], align 8
More information about the cfe-commits
mailing list