[clang] [Clang] Change placeholder from `undef` to `poison` (PR #117064)
via cfe-commits
cfe-commits at lists.llvm.org
Wed Nov 20 13:52:48 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang-codegen
Author: Pedro Lobo (pedroclobo)
<details>
<summary>Changes</summary>
Call `@<!-- -->llvm.vector.insert` with a `poison` subvec when coercing a fixed vector to a scalable vector with the same element type.
---
Patch is 506.55 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/117064.diff
15 Files Affected:
- (modified) clang/lib/CodeGen/CGCall.cpp (+2-2)
- (modified) clang/test/CodeGen/AArch64/pure-scalable-args.c (+10-10)
- (modified) clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp (+1-1)
- (modified) clang/test/CodeGen/AArch64/sve-vls-arith-ops.c (+156-156)
- (modified) clang/test/CodeGen/AArch64/sve-vls-bitwise-ops.c (+36-36)
- (modified) clang/test/CodeGen/AArch64/sve-vls-compare-ops.c (+72-72)
- (modified) clang/test/CodeGen/AArch64/sve-vls-shift-ops.c (+48-48)
- (modified) clang/test/CodeGen/RISCV/attr-rvv-vector-bits-cast.c (+7-7)
- (modified) clang/test/CodeGen/RISCV/attr-rvv-vector-bits-codegen.c (+11-11)
- (modified) clang/test/CodeGen/RISCV/rvv-vls-arith-ops.c (+144-144)
- (modified) clang/test/CodeGen/RISCV/rvv-vls-bitwise-ops.c (+32-32)
- (modified) clang/test/CodeGen/RISCV/rvv-vls-compare-ops.c (+60-60)
- (modified) clang/test/CodeGen/RISCV/rvv-vls-shift-ops.c (+48-48)
- (modified) clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c (+7-7)
- (modified) clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c (+5-5)
``````````diff
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 35d495d4dfab82..7dba2b779f5ef5 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1314,10 +1314,10 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
}
if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
auto *Load = CGF.Builder.CreateLoad(Src);
- auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
+ auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = CGF.Builder.CreateInsertVector(
- ScalableDstTy, UndefVec, Load, Zero, "cast.scalable");
+ ScalableDstTy, PoisonVec, Load, Zero, "cast.scalable");
if (ScalableDstTy != Ty)
Result = CGF.Builder.CreateBitCast(Result, Ty);
return Result;
diff --git a/clang/test/CodeGen/AArch64/pure-scalable-args.c b/clang/test/CodeGen/AArch64/pure-scalable-args.c
index 53d5ce4e8c9d9b..51eda3f93943dc 100644
--- a/clang/test/CodeGen/AArch64/pure-scalable-args.c
+++ b/clang/test/CodeGen/AArch64/pure-scalable-args.c
@@ -70,23 +70,23 @@ void test_argpass_simple(PST *p) {
// CHECK-AAPCS: define dso_local void @test_argpass_simple(ptr nocapture noundef readonly %p)
// CHECK-AAPCS-NEXT: entry:
// CHECK-AAPCS-NEXT: %0 = load <2 x i8>, ptr %p, align 16
-// CHECK-AAPCS-NEXT: %cast.scalable = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %0, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> poison, <2 x i8> %0, i64 0)
// CHECK-AAPCS-NEXT: %1 = bitcast <vscale x 2 x i8> %cast.scalable to <vscale x 16 x i1>
// CHECK-AAPCS-NEXT: %2 = getelementptr inbounds nuw i8, ptr %p, i64 16
// CHECK-AAPCS-NEXT: %3 = load <2 x double>, ptr %2, align 16
-// CHECK-AAPCS-NEXT: %cast.scalable1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %3, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> poison, <2 x double> %3, i64 0)
// CHECK-AAPCS-NEXT: %4 = getelementptr inbounds nuw i8, ptr %p, i64 32
// CHECK-AAPCS-NEXT: %5 = load <4 x float>, ptr %4, align 16
-// CHECK-AAPCS-NEXT: %cast.scalable2 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %5, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable2 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %5, i64 0)
// CHECK-AAPCS-NEXT: %6 = getelementptr inbounds nuw i8, ptr %p, i64 48
// CHECK-AAPCS-NEXT: %7 = load <4 x float>, ptr %6, align 16
-// CHECK-AAPCS-NEXT: %cast.scalable3 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %7, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable3 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %7, i64 0)
// CHECK-AAPCS-NEXT: %8 = getelementptr inbounds nuw i8, ptr %p, i64 64
// CHECK-AAPCS-NEXT: %9 = load <16 x i8>, ptr %8, align 16
-// CHECK-AAPCS-NEXT: %cast.scalable4 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %9, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable4 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> %9, i64 0)
// CHECK-AAPCS-NEXT: %10 = getelementptr inbounds nuw i8, ptr %p, i64 80
// CHECK-AAPCS-NEXT: %11 = load <2 x i8>, ptr %10, align 16
-// CHECK-AAPCS-NEXT: %cast.scalable5 = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %11, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable5 = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> poison, <2 x i8> %11, i64 0)
// CHECK-AAPCS-NEXT: %12 = bitcast <vscale x 2 x i8> %cast.scalable5 to <vscale x 16 x i1>
// CHECK-AAPCS-NEXT: tail call void @argpass_simple_callee(<vscale x 16 x i1> %1, <vscale x 2 x double> %cast.scalable1, <vscale x 4 x float> %cast.scalable2, <vscale x 4 x float> %cast.scalable3, <vscale x 16 x i8> %cast.scalable4, <vscale x 16 x i1> %12)
// CHECK-AAPCS-NEXT: ret void
@@ -431,9 +431,9 @@ void test_va_arg(int n, ...) {
// CHECK-AAPCS-NEXT: %v.sroa.43.0.vaarg.addr.sroa_idx = getelementptr inbounds i8, ptr %vaarg.addr, i64 48
// CHECK-AAPCS-NEXT: %v.sroa.43.0.copyload = load <4 x float>, ptr %v.sroa.43.0.vaarg.addr.sroa_idx, align 16
// CHECK-AAPCS-NEXT: call void @llvm.va_end.p0(ptr nonnull %ap)
-// CHECK-AAPCS-NEXT: %cast.scalable = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %v.sroa.0.0.copyload, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> poison, <2 x i8> %v.sroa.0.0.copyload, i64 0)
// CHECK-AAPCS-NEXT: %3 = bitcast <vscale x 2 x i8> %cast.scalable to <vscale x 16 x i1>
-// CHECK-AAPCS-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %v.sroa.43.0.copyload, i64 0)
+// CHECK-AAPCS-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %v.sroa.43.0.copyload, i64 0)
// CHECK-AAPCS-NEXT: call void @use1(<vscale x 16 x i1> noundef %3, <vscale x 4 x float> noundef %cast.scalable2)
// CHECK-AAPCS-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %ap)
// CHECK-AAPCS-NEXT: ret void
@@ -452,9 +452,9 @@ void test_va_arg(int n, ...) {
// CHECK-DARWIN-NEXT: %v.sroa.43.0..sroa_idx = getelementptr inbounds i8, ptr %0, i64 48
// CHECK-DARWIN-NEXT: %v.sroa.43.0.copyload = load <4 x float>, ptr %v.sroa.43.0..sroa_idx, align 16
// CHECK-DARWIN-NEXT: call void @llvm.va_end.p0(ptr nonnull %ap)
-// CHECK-DARWIN-NEXT: %cast.scalable = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %v.sroa.0.0.copyload, i64 0)
+// CHECK-DARWIN-NEXT: %cast.scalable = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> poison, <2 x i8> %v.sroa.0.0.copyload, i64 0)
// CHECK-DARWIN-NEXT: %1 = bitcast <vscale x 2 x i8> %cast.scalable to <vscale x 16 x i1>
-// CHECK-DARWIN-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %v.sroa.43.0.copyload, i64 0)
+// CHECK-DARWIN-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %v.sroa.43.0.copyload, i64 0)
// CHECK-DARWIN-NEXT: call void @use1(<vscale x 16 x i1> noundef %1, <vscale x 4 x float> noundef %cast.scalable2)
// CHECK-DARWIN-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap)
// CHECK-DARWIN-NEXT: ret void
diff --git a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
index 30ea73b63bce16..e82069aab24864 100644
--- a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
+++ b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
@@ -52,7 +52,7 @@ void test02() {
// CHECK-NEXT: [[X:%.*]] = tail call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[Y:%.*]] = tail call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE1:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS, 32)]] x i32> [[Y]], [[X]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32(<vscale x 4 x i32> undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32(<vscale x 4 x i32> poison, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
typedef svint32_t vec __attribute__((arm_sve_vector_bits(N)));
auto f(vec x, vec y) { return x + y; } // Returns a vec.
diff --git a/clang/test/CodeGen/AArch64/sve-vls-arith-ops.c b/clang/test/CodeGen/AArch64/sve-vls-arith-ops.c
index a4648e9d59dcbf..571043bb29e201 100644
--- a/clang/test/CodeGen/AArch64/sve-vls-arith-ops.c
+++ b/clang/test/CodeGen/AArch64/sve-vls-arith-ops.c
@@ -32,7 +32,7 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> poison, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t add_i8(fixed_int8_t a, fixed_int8_t b) {
@@ -44,7 +44,7 @@ fixed_int8_t add_i8(fixed_int8_t a, fixed_int8_t b) {
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> poison, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t add_i16(fixed_int16_t a, fixed_int16_t b) {
@@ -56,7 +56,7 @@ fixed_int16_t add_i16(fixed_int16_t a, fixed_int16_t b) {
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t add_i32(fixed_int32_t a, fixed_int32_t b) {
@@ -68,7 +68,7 @@ fixed_int32_t add_i32(fixed_int32_t a, fixed_int32_t b) {
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> poison, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t add_i64(fixed_int64_t a, fixed_int64_t b) {
@@ -80,7 +80,7 @@ fixed_int64_t add_i64(fixed_int64_t a, fixed_int64_t b) {
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> poison, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t add_u8(fixed_uint8_t a, fixed_uint8_t b) {
@@ -92,7 +92,7 @@ fixed_uint8_t add_u8(fixed_uint8_t a, fixed_uint8_t b) {
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> poison, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t add_u16(fixed_uint16_t a, fixed_uint16_t b) {
@@ -104,7 +104,7 @@ fixed_uint16_t add_u16(fixed_uint16_t a, fixed_uint16_t b) {
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t add_u32(fixed_uint32_t a, fixed_uint32_t b) {
@@ -116,7 +116,7 @@ fixed_uint32_t add_u32(fixed_uint32_t a, fixed_uint32_t b) {
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> poison, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t add_u64(fixed_uint64_t a, fixed_uint64_t b) {
@@ -131,7 +131,7 @@ fixed_uint64_t add_u64(fixed_uint64_t a, fixed_uint64_t b) {
// CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT: [[ADD:%.*]] = fadd <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[ADD]] to <32 x half>
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> poison, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t add_f16(fixed_float16_t a, fixed_float16_t b) {
@@ -143,7 +143,7 @@ fixed_float16_t add_f16(fixed_float16_t a, fixed_float16_t b) {
// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t add_f32(fixed_float32_t a, fixed_float32_t b) {
@@ -155,7 +155,7 @@ fixed_float32_t add_f32(fixed_float32_t a, fixed_float32_t b) {
// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> poison, <8 x double> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t add_f64(fixed_float64_t a, fixed_float64_t b) {
@@ -167,7 +167,7 @@ fixed_float64_t add_f64(fixed_float64_t a, fixed_float64_t b) {
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> poison, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t add_inplace_i8(fixed_int8_t a, fixed_int8_t b) {
@@ -179,7 +179,7 @@ fixed_int8_t add_inplace_i8(fixed_int8_t a, fixed_int8_t b) {
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> poison, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t add_inplace_i16(fixed_int16_t a, fixed_int16_t b) {
@@ -191,7 +191,7 @@ fixed_int16_t add_inplace_i16(fixed_int16_t a, fixed_int16_t b) {
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]]
-// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
+// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t add_inplace_i32(fixed_int32_t a, fixed_int32_t b) {
@@ -203,7 +203,7 @@ fixed_int32_t add_inplace_i32(fixed_int32_t a, fixed_int32_t b) {
// CHECK-NEXT: [[A:%.*]] = c...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/117064
More information about the cfe-commits
mailing list