[llvm-branch-commits] [clang] [HLSL] Overloads for `lerp` with a scalar weight (PR #137877)
Justin Bogner via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Apr 29 16:52:11 PDT 2025
https://github.com/bogner updated https://github.com/llvm/llvm-project/pull/137877
>From 20a1723d5f4f9132e44ba7d76e11f94ca367e269 Mon Sep 17 00:00:00 2001
From: Justin Bogner <mail at justinbogner.com>
Date: Tue, 29 Apr 2025 11:59:37 -0700
Subject: [PATCH 1/2] [HLSL] Overloads for `lerp` with a scalar weight
This adds overloads for the `lerp` function that accept a scalar for the weight
parameter by splatting it into the appropriate vector.
Fixes #137827
---
.../lib/Headers/hlsl/hlsl_compat_overloads.h | 6 ++++
clang/lib/Sema/SemaHLSL.cpp | 3 +-
.../CodeGenHLSL/builtins/lerp-overloads.hlsl | 30 +++++++++++++++++--
clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl | 22 +++++++-------
4 files changed, 47 insertions(+), 14 deletions(-)
diff --git a/clang/lib/Headers/hlsl/hlsl_compat_overloads.h b/clang/lib/Headers/hlsl/hlsl_compat_overloads.h
index 47ae34adfe541..4874206d349c0 100644
--- a/clang/lib/Headers/hlsl/hlsl_compat_overloads.h
+++ b/clang/lib/Headers/hlsl/hlsl_compat_overloads.h
@@ -277,6 +277,12 @@ constexpr bool4 isinf(double4 V) { return isinf((float4)V); }
// lerp builtins overloads
//===----------------------------------------------------------------------===//
+template <typename T, uint N>
+constexpr __detail::enable_if_t<(N > 1 && N <= 4), vector<T, N>>
+lerp(vector<T, N> x, vector<T, N> y, T s) {
+ return lerp(x, y, (vector<T, N>)s);
+}
+
_DXC_COMPAT_TERNARY_DOUBLE_OVERLOADS(lerp)
_DXC_COMPAT_TERNARY_INTEGER_OVERLOADS(lerp)
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 38322e6ba063b..0df27d9495109 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -2555,7 +2555,8 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__builtin_hlsl_lerp: {
if (SemaRef.checkArgCount(TheCall, 3))
return true;
- if (CheckVectorElementCallArgs(&SemaRef, TheCall))
+ if (CheckAnyScalarOrVector(&SemaRef, TheCall, 0) ||
+ CheckAllArgsHaveSameType(&SemaRef, TheCall))
return true;
if (SemaRef.BuiltinElementwiseTernaryMath(TheCall))
return true;
diff --git a/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl b/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl
index 9c613ce2db27d..aac19f70fe263 100644
--- a/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl
@@ -1,5 +1,7 @@
-// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple dxil-pc-shadermodel6.3-library %s -emit-llvm -O1 -o - | FileCheck %s --check-prefixes=CHECK -DFNATTRS="noundef nofpclass(nan inf)" -DTARGET=dx
-// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple spirv-unknown-vulkan-compute %s -emit-llvm -O1 -o - | FileCheck %s --check-prefixes=CHECK -DFNATTRS="spir_func noundef nofpclass(nan inf)" -DTARGET=spv
+// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple dxil-pc-shadermodel6.3-library %s -fnative-half-type -emit-llvm -O1 -o - | FileCheck %s --check-prefixes=CHECK,NATIVE_HALF -DFNATTRS="noundef nofpclass(nan inf)" -DTARGET=dx
+// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple dxil-pc-shadermodel6.3-library %s -emit-llvm -O1 -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF -DFNATTRS="noundef nofpclass(nan inf)" -DTARGET=dx
+// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple spirv-unknown-vulkan-compute %s -fnative-half-type -emit-llvm -O1 -o - | FileCheck %s --check-prefixes=CHECK,NATIVE_HALF -DFNATTRS="spir_func noundef nofpclass(nan inf)" -DTARGET=spv
+// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple spirv-unknown-vulkan-compute %s -emit-llvm -O1 -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF -DFNATTRS="spir_func noundef nofpclass(nan inf)" -DTARGET=spv
// CHECK: define [[FNATTRS]] float @_Z16test_lerp_doubled(
// CHECK-NEXT: [[ENTRY:.*:]]
@@ -158,3 +160,27 @@ float3 test_lerp_uint64_t3(uint64_t3 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: ret <4 x float> [[LERP]]
//
float4 test_lerp_uint64_t4(uint64_t4 p0) { return lerp(p0, p0, p0); }
+
+// NATIVE_HALF: define [[FNATTRS]] <3 x half> @_Z21test_lerp_half_scalarDv3_DhS_Dh{{.*}}(
+// NO_HALF: define [[FNATTRS]] <3 x float> @_Z21test_lerp_half_scalarDv3_DhS_Dh(
+// CHECK-NEXT: [[ENTRY:.*:]]
+// NATIVE_HALF-NEXT: [[SPLATINSERT:%.*]] = insertelement <3 x half> poison, half %{{.*}}, i64 0
+// NATIVE_HALF-NEXT: [[SPLAT:%.*]] = shufflevector <3 x half> [[SPLATINSERT]], <3 x half> poison, <3 x i32> zeroinitializer
+// NATIVE_HALF-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x half> @llvm.[[TARGET]].lerp.v3f16(<3 x half> {{.*}}, <3 x half> {{.*}}, <3 x half> [[SPLAT]])
+// NATIVE_HALF-NEXT: ret <3 x half> [[LERP]]
+// NO_HALF-NEXT: [[SPLATINSERT:%.*]] = insertelement <3 x float> poison, float %{{.*}}, i64 0
+// NO_HALF-NEXT: [[SPLAT:%.*]] = shufflevector <3 x float> [[SPLATINSERT]], <3 x float> poison, <3 x i32> zeroinitializer
+// NO_HALF-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> {{.*}}, <3 x float> {{.*}}, <3 x float> [[SPLAT]])
+// NO_HALF-NEXT: ret <3 x float> [[LERP]]
+half3 test_lerp_half_scalar(half3 x, half3 y, half s) { return lerp(x, y, s); }
+
+// CHECK: define [[FNATTRS]] <3 x float> @_Z22test_lerp_float_scalarDv3_fS_f(
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[SPLATINSERT:%.*]] = insertelement <3 x float> poison, float %{{.*}}, i64 0
+// CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <3 x float> [[SPLATINSERT]], <3 x float> poison, <3 x i32> zeroinitializer
+// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> {{.*}}, <3 x float> {{.*}}, <3 x float> [[SPLAT]])
+// CHECK-NEXT: ret <3 x float> [[LERP]]
+//
+float3 test_lerp_float_scalar(float3 x, float3 y, float s) {
+ return lerp(x, y, s);
+}
diff --git a/clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl
index 398d3c7f938c1..b4734a985f31c 100644
--- a/clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl
@@ -62,42 +62,42 @@ float2 test_lerp_element_type_mismatch(half2 p0, float2 p1) {
float2 test_builtin_lerp_float2_splat(float p0, float2 p1) {
return __builtin_hlsl_lerp(p0, p1, p1);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float2 test_builtin_lerp_float2_splat2(double p0, double2 p1) {
return __builtin_hlsl_lerp(p1, p0, p1);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float2 test_builtin_lerp_float2_splat3(double p0, double2 p1) {
return __builtin_hlsl_lerp(p1, p1, p0);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float3 test_builtin_lerp_float3_splat(float p0, float3 p1) {
return __builtin_hlsl_lerp(p0, p1, p1);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float4 test_builtin_lerp_float4_splat(float p0, float4 p1) {
return __builtin_hlsl_lerp(p0, p1, p1);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float2 test_lerp_float2_int_splat(float2 p0, int p1) {
return __builtin_hlsl_lerp(p0, p1, p1);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float3 test_lerp_float3_int_splat(float3 p0, int p1) {
return __builtin_hlsl_lerp(p0, p1, p1);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float2 test_builtin_lerp_int_vect_to_float_vec_promotion(int2 p0, float p1) {
return __builtin_hlsl_lerp(p0, p1, p1);
- // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float test_builtin_lerp_bool_type_promotion(bool p0) {
@@ -107,17 +107,17 @@ float test_builtin_lerp_bool_type_promotion(bool p0) {
float builtin_bool_to_float_type_promotion(float p0, bool p1) {
return __builtin_hlsl_lerp(p0, p0, p1);
- // expected-error at -1 {{3rd argument must be a scalar or vector of floating-point types (was 'bool')}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float builtin_bool_to_float_type_promotion2(bool p0, float p1) {
return __builtin_hlsl_lerp(p1, p0, p1);
- // expected-error at -1 {{2nd argument must be a scalar or vector of floating-point types (was 'bool')}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float builtin_lerp_int_to_float_promotion(float p0, int p1) {
return __builtin_hlsl_lerp(p0, p0, p1);
- // expected-error at -1 {{3rd argument must be a scalar or vector of floating-point types (was 'int')}}
+ // expected-error at -1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
}
float4 test_lerp_int4(int4 p0, int4 p1, int4 p2) {
>From b333286839a504f07c9a0c4b47d5221044a5330c Mon Sep 17 00:00:00 2001
From: Justin Bogner <mail at justinbogner.com>
Date: Tue, 29 Apr 2025 16:51:55 -0700
Subject: [PATCH 2/2] clean up empty lines
---
.../CodeGenHLSL/builtins/lerp-overloads.hlsl | 19 -------------------
1 file changed, 19 deletions(-)
diff --git a/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl b/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl
index aac19f70fe263..43ef9d3eda2ae 100644
--- a/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/lerp-overloads.hlsl
@@ -22,7 +22,6 @@ float2 test_lerp_double2(double2 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = fptrunc {{.*}} <3 x double> [[P0:%.*]] to <3 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> [[CONV]], <3 x float> [[CONV]], <3 x float> [[CONV]])
// CHECK-NEXT: ret <3 x float> [[LERP]]
-//
float3 test_lerp_double3(double3 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <4 x float> @_Z17test_lerp_double4Dv4_d(
@@ -30,7 +29,6 @@ float3 test_lerp_double3(double3 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = fptrunc {{.*}} <4 x double> [[P0:%.*]] to <4 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <4 x float> @llvm.[[TARGET]].lerp.v4f32(<4 x float> [[CONV]], <4 x float> [[CONV]], <4 x float> [[CONV]])
// CHECK-NEXT: ret <4 x float> [[LERP]]
-//
float4 test_lerp_double4(double4 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] float @_Z13test_lerp_inti(
@@ -38,7 +36,6 @@ float4 test_lerp_double4(double4 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[P0:%.*]] to float
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} float @llvm.[[TARGET]].lerp.f32(float [[CONV]], float [[CONV]], float [[CONV]])
// CHECK-NEXT: ret float [[LERP]]
-//
float test_lerp_int(int p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <2 x float> @_Z14test_lerp_int2Dv2_i(
@@ -46,7 +43,6 @@ float test_lerp_int(int p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp <2 x i32> [[P0:%.*]] to <2 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <2 x float> @llvm.[[TARGET]].lerp.v2f32(<2 x float> [[CONV]], <2 x float> [[CONV]], <2 x float> [[CONV]])
// CHECK-NEXT: ret <2 x float> [[LERP]]
-//
float2 test_lerp_int2(int2 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <3 x float> @_Z14test_lerp_int3Dv3_i(
@@ -54,7 +50,6 @@ float2 test_lerp_int2(int2 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp <3 x i32> [[P0:%.*]] to <3 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> [[CONV]], <3 x float> [[CONV]], <3 x float> [[CONV]])
// CHECK-NEXT: ret <3 x float> [[LERP]]
-//
float3 test_lerp_int3(int3 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <4 x float> @_Z14test_lerp_int4Dv4_i(
@@ -62,7 +57,6 @@ float3 test_lerp_int3(int3 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp <4 x i32> [[P0:%.*]] to <4 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <4 x float> @llvm.[[TARGET]].lerp.v4f32(<4 x float> [[CONV]], <4 x float> [[CONV]], <4 x float> [[CONV]])
// CHECK-NEXT: ret <4 x float> [[LERP]]
-//
float4 test_lerp_int4(int4 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] float @_Z14test_lerp_uintj(
@@ -70,7 +64,6 @@ float4 test_lerp_int4(int4 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp i32 [[P0:%.*]] to float
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} float @llvm.[[TARGET]].lerp.f32(float [[CONV]], float [[CONV]], float [[CONV]])
// CHECK-NEXT: ret float [[LERP]]
-//
float test_lerp_uint(uint p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <2 x float> @_Z15test_lerp_uint2Dv2_j(
@@ -78,7 +71,6 @@ float test_lerp_uint(uint p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp <2 x i32> [[P0:%.*]] to <2 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <2 x float> @llvm.[[TARGET]].lerp.v2f32(<2 x float> [[CONV]], <2 x float> [[CONV]], <2 x float> [[CONV]])
// CHECK-NEXT: ret <2 x float> [[LERP]]
-//
float2 test_lerp_uint2(uint2 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <3 x float> @_Z15test_lerp_uint3Dv3_j(
@@ -86,7 +78,6 @@ float2 test_lerp_uint2(uint2 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp <3 x i32> [[P0:%.*]] to <3 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> [[CONV]], <3 x float> [[CONV]], <3 x float> [[CONV]])
// CHECK-NEXT: ret <3 x float> [[LERP]]
-//
float3 test_lerp_uint3(uint3 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <4 x float> @_Z15test_lerp_uint4Dv4_j(
@@ -94,7 +85,6 @@ float3 test_lerp_uint3(uint3 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp <4 x i32> [[P0:%.*]] to <4 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <4 x float> @llvm.[[TARGET]].lerp.v4f32(<4 x float> [[CONV]], <4 x float> [[CONV]], <4 x float> [[CONV]])
// CHECK-NEXT: ret <4 x float> [[LERP]]
-//
float4 test_lerp_uint4(uint4 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] float @_Z17test_lerp_int64_tl(
@@ -102,7 +92,6 @@ float4 test_lerp_uint4(uint4 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp i64 [[P0:%.*]] to float
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} float @llvm.[[TARGET]].lerp.f32(float [[CONV]], float [[CONV]], float [[CONV]])
// CHECK-NEXT: ret float [[LERP]]
-//
float test_lerp_int64_t(int64_t p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <2 x float> @_Z18test_lerp_int64_t2Dv2_l(
@@ -110,7 +99,6 @@ float test_lerp_int64_t(int64_t p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp <2 x i64> [[P0:%.*]] to <2 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <2 x float> @llvm.[[TARGET]].lerp.v2f32(<2 x float> [[CONV]], <2 x float> [[CONV]], <2 x float> [[CONV]])
// CHECK-NEXT: ret <2 x float> [[LERP]]
-//
float2 test_lerp_int64_t2(int64_t2 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <3 x float> @_Z18test_lerp_int64_t3Dv3_l(
@@ -118,7 +106,6 @@ float2 test_lerp_int64_t2(int64_t2 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp <3 x i64> [[P0:%.*]] to <3 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> [[CONV]], <3 x float> [[CONV]], <3 x float> [[CONV]])
// CHECK-NEXT: ret <3 x float> [[LERP]]
-//
float3 test_lerp_int64_t3(int64_t3 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <4 x float> @_Z18test_lerp_int64_t4Dv4_l(
@@ -126,7 +113,6 @@ float3 test_lerp_int64_t3(int64_t3 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = sitofp <4 x i64> [[P0:%.*]] to <4 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <4 x float> @llvm.[[TARGET]].lerp.v4f32(<4 x float> [[CONV]], <4 x float> [[CONV]], <4 x float> [[CONV]])
// CHECK-NEXT: ret <4 x float> [[LERP]]
-//
float4 test_lerp_int64_t4(int64_t4 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] float @_Z18test_lerp_uint64_tm(
@@ -134,7 +120,6 @@ float4 test_lerp_int64_t4(int64_t4 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp i64 [[P0:%.*]] to float
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} float @llvm.[[TARGET]].lerp.f32(float [[CONV]], float [[CONV]], float [[CONV]])
// CHECK-NEXT: ret float [[LERP]]
-//
float test_lerp_uint64_t(uint64_t p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <2 x float> @_Z19test_lerp_uint64_t2Dv2_m(
@@ -142,7 +127,6 @@ float test_lerp_uint64_t(uint64_t p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp <2 x i64> [[P0:%.*]] to <2 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <2 x float> @llvm.[[TARGET]].lerp.v2f32(<2 x float> [[CONV]], <2 x float> [[CONV]], <2 x float> [[CONV]])
// CHECK-NEXT: ret <2 x float> [[LERP]]
-//
float2 test_lerp_uint64_t2(uint64_t2 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <3 x float> @_Z19test_lerp_uint64_t3Dv3_m(
@@ -150,7 +134,6 @@ float2 test_lerp_uint64_t2(uint64_t2 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp <3 x i64> [[P0:%.*]] to <3 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> [[CONV]], <3 x float> [[CONV]], <3 x float> [[CONV]])
// CHECK-NEXT: ret <3 x float> [[LERP]]
-//
float3 test_lerp_uint64_t3(uint64_t3 p0) { return lerp(p0, p0, p0); }
// CHECK: define [[FNATTRS]] <4 x float> @_Z19test_lerp_uint64_t4Dv4_m(
@@ -158,7 +141,6 @@ float3 test_lerp_uint64_t3(uint64_t3 p0) { return lerp(p0, p0, p0); }
// CHECK-NEXT: [[CONV:%.*]] = uitofp <4 x i64> [[P0:%.*]] to <4 x float>
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <4 x float> @llvm.[[TARGET]].lerp.v4f32(<4 x float> [[CONV]], <4 x float> [[CONV]], <4 x float> [[CONV]])
// CHECK-NEXT: ret <4 x float> [[LERP]]
-//
float4 test_lerp_uint64_t4(uint64_t4 p0) { return lerp(p0, p0, p0); }
// NATIVE_HALF: define [[FNATTRS]] <3 x half> @_Z21test_lerp_half_scalarDv3_DhS_Dh{{.*}}(
@@ -180,7 +162,6 @@ half3 test_lerp_half_scalar(half3 x, half3 y, half s) { return lerp(x, y, s); }
// CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <3 x float> [[SPLATINSERT]], <3 x float> poison, <3 x i32> zeroinitializer
// CHECK-NEXT: [[LERP:%.*]] = tail call {{.*}} <3 x float> @llvm.[[TARGET]].lerp.v3f32(<3 x float> {{.*}}, <3 x float> {{.*}}, <3 x float> [[SPLAT]])
// CHECK-NEXT: ret <3 x float> [[LERP]]
-//
float3 test_lerp_float_scalar(float3 x, float3 y, float s) {
return lerp(x, y, s);
}
More information about the llvm-branch-commits
mailing list