[clang] [llvm] [HLSL][DXIL][SPIRV] QuadReadAcrossDiagonal intrinsic support (PR #188567)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 31 01:19:54 PDT 2026
https://github.com/kcloudy0717 updated https://github.com/llvm/llvm-project/pull/188567
>From f7f8136311186ee2a5ce90d5650eea745e17ec39 Mon Sep 17 00:00:00 2001
From: Kai Huang <kcloudy0717 at gmail.com>
Date: Sat, 7 Mar 2026 21:43:03 +0800
Subject: [PATCH 1/6] [HLSL][DXIL][SPIRV] QuadReadAcrossDiagonal intrinsic
support
---
clang/include/clang/Basic/Builtins.td | 6 +
clang/lib/CodeGen/CGHLSLBuiltins.cpp | 8 +
clang/lib/CodeGen/CGHLSLRuntime.h | 2 +
.../lib/Headers/hlsl/hlsl_alias_intrinsics.h | 99 ++++++++++
clang/lib/Sema/SemaHLSL.cpp | 3 +-
.../builtins/QuadReadAcrossDiagonal.hlsl | 171 ++++++++++++++++++
.../QuadReadAcrossDiagonal-errors.hlsl | 28 +++
llvm/include/llvm/IR/IntrinsicsDirectX.td | 1 +
llvm/include/llvm/IR/IntrinsicsSPIRV.td | 1 +
llvm/lib/Target/DirectX/DXIL.td | 4 +
llvm/lib/Target/DirectX/DXILShaderFlags.cpp | 1 +
.../DirectX/DirectXTargetTransformInfo.cpp | 1 +
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 3 +
.../CodeGen/DirectX/QuadReadAcrossDiagonal.ll | 87 +++++++++
.../CodeGen/DirectX/ShaderFlags/wave-ops.ll | 7 +
.../hlsl-intrinsics/QuadReadAcrossDiagonal.ll | 44 +++++
16 files changed, 465 insertions(+), 1 deletion(-)
create mode 100644 clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl
create mode 100644 clang/test/SemaHLSL/BuiltIns/QuadReadAcrossDiagonal-errors.hlsl
create mode 100644 llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll
create mode 100644 llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index f1743c7286def..21070761d289f 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -5288,6 +5288,12 @@ def HLSLQuadReadAcrossY : LangBuiltin<"HLSL_LANG"> {
let Prototype = "void(...)";
}
+def HLSLQuadReadAcrossDiagonal : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_quad_read_across_diagonal"];
+ let Attributes = [NoThrow, Const];
+ let Prototype = "void(...)";
+}
+
def HLSLClamp : LangBuiltin<"HLSL_LANG"> {
let Spellings = ["__builtin_hlsl_elementwise_clamp"];
let Attributes = [NoThrow, Const, CustomTypeChecking];
diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
index 6803d6471979d..ed7af6941d279 100644
--- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp
+++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
@@ -1455,6 +1455,14 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
&CGM.getModule(), ID, {OpExpr->getType()}),
ArrayRef{OpExpr}, "hlsl.quad.read.across.y");
}
+ case Builtin::BI__builtin_hlsl_quad_read_across_diagonal: {
+ Value *OpExpr = EmitScalarExpr(E->getArg(0));
+ Intrinsic::ID ID =
+ CGM.getHLSLRuntime().getQuadReadAcrossDiagonalIntrinsic();
+ return EmitRuntimeCall(Intrinsic::getOrInsertDeclaration(
+ &CGM.getModule(), ID, {OpExpr->getType()}),
+ ArrayRef{OpExpr}, "hlsl.quad.read.across.diagonal");
+ }
case Builtin::BI__builtin_hlsl_elementwise_sign: {
auto *Arg0 = E->getArg(0);
Value *Op0 = EmitScalarExpr(Arg0);
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 691b9e54dce61..c3c26d4e27565 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -160,6 +160,8 @@ class CGHLSLRuntime {
GENERATE_HLSL_INTRINSIC_FUNCTION(WaveReadLaneAt, wave_readlane)
GENERATE_HLSL_INTRINSIC_FUNCTION(QuadReadAcrossX, quad_read_across_x)
GENERATE_HLSL_INTRINSIC_FUNCTION(QuadReadAcrossY, quad_read_across_y)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(QuadReadAcrossDiagonal,
+ quad_read_across_diagonal)
GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitUHigh, firstbituhigh)
GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitSHigh, firstbitshigh)
GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitLow, firstbitlow)
diff --git a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
index 80c415ef66644..372f67cef2993 100644
--- a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
@@ -3794,6 +3794,105 @@ __attribute__((convergent)) double3 QuadReadAcrossY(double3);
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_y)
__attribute__((convergent)) double4 QuadReadAcrossY(double4);
+//===----------------------------------------------------------------------===//
+// QuadReadAcrossDiagonal builtins
+//===----------------------------------------------------------------------===//
+
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) half QuadReadAcrossDiagonal(half);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) half2 QuadReadAcrossDiagonal(half2);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) half3 QuadReadAcrossDiagonal(half3);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) half4 QuadReadAcrossDiagonal(half4);
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int16_t QuadReadAcrossDiagonal(int16_t);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int16_t2 QuadReadAcrossDiagonal(int16_t2);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int16_t3 QuadReadAcrossDiagonal(int16_t3);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int16_t4 QuadReadAcrossDiagonal(int16_t4);
+
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint16_t QuadReadAcrossDiagonal(uint16_t);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint16_t2 QuadReadAcrossDiagonal(uint16_t2);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint16_t3 QuadReadAcrossDiagonal(uint16_t3);
+_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint16_t4 QuadReadAcrossDiagonal(uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int QuadReadAcrossDiagonal(int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int2 QuadReadAcrossDiagonal(int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int3 QuadReadAcrossDiagonal(int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int4 QuadReadAcrossDiagonal(int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint QuadReadAcrossDiagonal(uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint2 QuadReadAcrossDiagonal(uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint3 QuadReadAcrossDiagonal(uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint4 QuadReadAcrossDiagonal(uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int64_t QuadReadAcrossDiagonal(int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int64_t2 QuadReadAcrossDiagonal(int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int64_t3 QuadReadAcrossDiagonal(int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) int64_t4 QuadReadAcrossDiagonal(int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint64_t QuadReadAcrossDiagonal(uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint64_t2 QuadReadAcrossDiagonal(uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint64_t3 QuadReadAcrossDiagonal(uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) uint64_t4 QuadReadAcrossDiagonal(uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) float QuadReadAcrossDiagonal(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) float2 QuadReadAcrossDiagonal(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) float3 QuadReadAcrossDiagonal(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) float4 QuadReadAcrossDiagonal(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) double QuadReadAcrossDiagonal(double);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) double2 QuadReadAcrossDiagonal(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) double3 QuadReadAcrossDiagonal(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
+__attribute__((convergent)) double4 QuadReadAcrossDiagonal(double4);
+
//===----------------------------------------------------------------------===//
// sign builtins
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 2b977b2793efe..2fe784e594f20 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -4343,7 +4343,8 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_quad_read_across_x:
- case Builtin::BI__builtin_hlsl_quad_read_across_y: {
+ case Builtin::BI__builtin_hlsl_quad_read_across_y:
+ case Builtin::BI__builtin_hlsl_quad_read_across_diagonal: {
if (SemaRef.checkArgCount(TheCall, 1))
return true;
diff --git a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl
new file mode 100644
index 0000000000000..f89be574e8e75
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl
@@ -0,0 +1,171 @@
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-compute %s -fnative-half-type -fnative-int16-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,CHECK-DXIL,CHECK-NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-DXIL,CHECK-NO_HALF
+
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-unknown-vulkan-compute %s -fnative-half-type -fnative-int16-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NO_HALF
+
+// Capture the expected interchange format so not every check needs to be duplicated
+// CHECK-DXIL: %[[RET:.*]] = call [[CC:]]i32 @llvm.[[ICF:dx]].quad.read.across.diagonal.i32(i32 %[[#]])
+// CHECK-SPIRV: %[[RET:.*]] = call [[CC:spir_func ]]i32 @llvm.[[ICF:spv]].quad.read.across.diagonal.i32(i32 %[[#]])
+// CHECK: ret i32 %[[RET]]
+int test_int(int expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v2i32(<2 x i32> %[[#]])
+// CHECK: ret <2 x i32> %[[RET]]
+int2 test_int2(int2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v3i32(<3 x i32> %[[#]])
+// CHECK: ret <3 x i32> %[[RET]]
+int3 test_int3(int3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v4i32(<4 x i32> %[[#]])
+// CHECK: ret <4 x i32> %[[RET]]
+int4 test_int4(int4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]i32 @llvm.[[ICF]].quad.read.across.diagonal.i32(i32 %[[#]])
+// CHECK: ret i32 %[[RET]]
+uint test_uint(uint expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v2i32(<2 x i32> %[[#]])
+// CHECK: ret <2 x i32> %[[RET]]
+uint2 test_uint2(uint2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v3i32(<3 x i32> %[[#]])
+// CHECK: ret <3 x i32> %[[RET]]
+uint3 test_uint3(uint3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v4i32(<4 x i32> %[[#]])
+// CHECK: ret <4 x i32> %[[RET]]
+uint4 test_uint4(uint4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]i64 @llvm.[[ICF]].quad.read.across.diagonal.i64(i64 %[[#]])
+// CHECK: ret i64 %[[RET]]
+int64_t test_int64_t(int64_t expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v2i64(<2 x i64> %[[#]])
+// CHECK: ret <2 x i64> %[[RET]]
+int64_t2 test_int64_t2(int64_t2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v3i64(<3 x i64> %[[#]])
+// CHECK: ret <3 x i64> %[[RET]]
+int64_t3 test_int64_t3(int64_t3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v4i64(<4 x i64> %[[#]])
+// CHECK: ret <4 x i64> %[[RET]]
+int64_t4 test_int64_t4(int64_t4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]i64 @llvm.[[ICF]].quad.read.across.diagonal.i64(i64 %[[#]])
+// CHECK: ret i64 %[[RET]]
+uint64_t test_uint64_t(uint64_t expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v2i64(<2 x i64> %[[#]])
+// CHECK: ret <2 x i64> %[[RET]]
+uint64_t2 test_uint64_t2(uint64_t2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v3i64(<3 x i64> %[[#]])
+// CHECK: ret <3 x i64> %[[RET]]
+uint64_t3 test_uint64_t3(uint64_t3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v4i64(<4 x i64> %[[#]])
+// CHECK: ret <4 x i64> %[[RET]]
+uint64_t4 test_uint64_t4(uint64_t4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float @llvm.[[ICF]].quad.read.across.diagonal.f32(float %[[#]])
+// CHECK: ret float %[[RET]]
+float test_float(float expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> @llvm.[[ICF]].quad.read.across.diagonal.v2f32(<2 x float> %[[#]])
+// CHECK: ret <2 x float> %[[RET]]
+float2 test_float2(float2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> @llvm.[[ICF]].quad.read.across.diagonal.v3f32(<3 x float> %[[#]])
+// CHECK: ret <3 x float> %[[RET]]
+float3 test_float3(float3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> @llvm.[[ICF]].quad.read.across.diagonal.v4f32(<4 x float> %[[#]])
+// CHECK: ret <4 x float> %[[RET]]
+float4 test_float4(float4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]double @llvm.[[ICF]].quad.read.across.diagonal.f64(double %[[#]])
+// CHECK: ret double %[[RET]]
+double test_double(double expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x double> @llvm.[[ICF]].quad.read.across.diagonal.v2f64(<2 x double> %[[#]])
+// CHECK: ret <2 x double> %[[RET]]
+double2 test_double2(double2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x double> @llvm.[[ICF]].quad.read.across.diagonal.v3f64(<3 x double> %[[#]])
+// CHECK: ret <3 x double> %[[RET]]
+double3 test_double3(double3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x double> @llvm.[[ICF]].quad.read.across.diagonal.v4f64(<4 x double> %[[#]])
+// CHECK: ret <4 x double> %[[RET]]
+double4 test_double4(double4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]half @llvm.[[ICF]].quad.read.across.diagonal.f16(half %[[#]])
+// CHECK-NATIVE_HALF: ret half %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float @llvm.[[ICF]].quad.read.across.diagonal.f32(float %[[#]])
+// CHECK-NO_HALF: ret float %[[RET]]
+half test_half(half expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x half> @llvm.[[ICF]].quad.read.across.diagonal.v2f16(<2 x half> %[[#]])
+// CHECK-NATIVE_HALF: ret <2 x half> %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> @llvm.[[ICF]].quad.read.across.diagonal.v2f32(<2 x float> %[[#]])
+// CHECK-NO_HALF: ret <2 x float> %[[RET]]
+half2 test_half2(half2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x half> @llvm.[[ICF]].quad.read.across.diagonal.v3f16(<3 x half> %[[#]])
+// CHECK-NATIVE_HALF: ret <3 x half> %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> @llvm.[[ICF]].quad.read.across.diagonal.v3f32(<3 x float> %[[#]])
+// CHECK-NO_HALF: ret <3 x float> %[[RET]]
+half3 test_half3(half3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x half> @llvm.[[ICF]].quad.read.across.diagonal.v4f16(<4 x half> %[[#]])
+// CHECK-NATIVE_HALF: ret <4 x half> %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> @llvm.[[ICF]].quad.read.across.diagonal.v4f32(<4 x float> %[[#]])
+// CHECK-NO_HALF: ret <4 x float> %[[RET]]
+half4 test_half4(half4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+#ifdef __HLSL_ENABLE_16_BIT
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 @llvm.[[ICF]].quad.read.across.diagonal.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: ret i16 %[[RET]]
+int16_t test_int16_t(int16_t expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
+int16_t2 test_int16_t2(int16_t2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
+int16_t3 test_int16_t3(int16_t3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
+int16_t4 test_int16_t4(int16_t4 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 @llvm.[[ICF]].quad.read.across.diagonal.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: ret i16 %[[RET]]
+uint16_t test_uint16_t(uint16_t expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
+uint16_t2 test_uint16_t2(uint16_t2 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
+uint16_t3 test_uint16_t3(uint16_t3 expr) { return QuadReadAcrossDiagonal(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
+uint16_t4 test_uint16_t4(uint16_t4 expr) { return QuadReadAcrossDiagonal(expr); }
+#endif
diff --git a/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossDiagonal-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossDiagonal-errors.hlsl
new file mode 100644
index 0000000000000..322eacd7ca798
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossDiagonal-errors.hlsl
@@ -0,0 +1,28 @@
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -verify
+
+int test_too_few_arg() {
+ return __builtin_hlsl_quad_read_across_diagonal();
+ // expected-error at -1 {{too few arguments to function call, expected 1, have 0}}
+}
+
+float2 test_too_many_arg(float2 p0) {
+ return __builtin_hlsl_quad_read_across_diagonal(p0, p0);
+ // expected-error at -1 {{too many arguments to function call, expected 1, have 2}}
+}
+
+bool test_expr_bool_type_check(bool p0) {
+ return __builtin_hlsl_quad_read_across_diagonal(p0);
+ // expected-error at -1 {{invalid operand of type 'bool'}}
+}
+
+bool2 test_expr_bool_vec_type_check(bool2 p0) {
+ return __builtin_hlsl_quad_read_across_diagonal(p0);
+ // expected-error at -1 {{invalid operand of type 'bool2' (aka 'vector<bool, 2>')}}
+}
+
+struct S { float f; };
+
+S test_expr_struct_type_check(S p0) {
+ return __builtin_hlsl_quad_read_across_diagonal(p0);
+ // expected-error at -1 {{invalid operand of type 'S' where a scalar or vector is required}}
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index 86d1e1f045c14..d61231a1b6581 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -257,6 +257,7 @@ def int_dx_wave_prefix_product : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatch
def int_dx_wave_prefix_uproduct : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
def int_dx_quad_read_across_x : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
def int_dx_quad_read_across_y : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
+def int_dx_quad_read_across_diagonal : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
def int_dx_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_any_ty], [IntrNoMem]>;
def int_dx_step : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_dx_splitdouble : DefaultAttrsIntrinsic<[llvm_anyint_ty, LLVMMatchType<0>],
diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
index 3e57e67b51124..b57687a5fc4fe 100644
--- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td
+++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
@@ -155,6 +155,7 @@ def int_spv_rsqrt : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty]
def int_spv_wave_prefix_product : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
def int_spv_quad_read_across_x : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
def int_spv_quad_read_across_y : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
+ def int_spv_quad_read_across_diagonal : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
def int_spv_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_any_ty], [IntrNoMem]>;
def int_spv_radians : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty], [IntrNoMem]>;
def int_spv_group_memory_barrier : DefaultAttrsIntrinsic<[], [], [IntrConvergent]>;
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 0a1e0114aa3bb..e16601fac4a5f 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -1234,6 +1234,10 @@ def QuadOp : DXILOp<123, quadOp> {
[
IntrinArgIndex<0>, IntrinArgI8<QuadOpKind_ReadAcrossY>
]>,
+ IntrinSelect<int_dx_quad_read_across_diagonal,
+ [
+ IntrinArgIndex<0>, IntrinArgI8<QuadOpKind_ReadAcrossDiagonal>
+ ]>,
];
let arguments = [OverloadTy, Int8Ty];
diff --git a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
index 997d441121970..3a9ff9f62361a 100644
--- a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
+++ b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
@@ -109,6 +109,7 @@ static bool checkWaveOps(Intrinsic::ID IID) {
// Quad Op Variants
case Intrinsic::dx_quad_read_across_x:
case Intrinsic::dx_quad_read_across_y:
+ case Intrinsic::dx_quad_read_across_diagonal:
return true;
}
}
diff --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
index 2badca6ae9bed..53eabf6fa5411 100644
--- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
@@ -79,6 +79,7 @@ bool DirectXTTIImpl::isTargetIntrinsicTriviallyScalarizable(
case Intrinsic::dx_wave_prefix_uproduct:
case Intrinsic::dx_quad_read_across_x:
case Intrinsic::dx_quad_read_across_y:
+ case Intrinsic::dx_quad_read_across_diagonal:
case Intrinsic::dx_imad:
case Intrinsic::dx_umad:
case Intrinsic::dx_ddx_coarse:
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 9e75932b20f59..269eb1c0ed1ba 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -4602,6 +4602,9 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_quad_read_across_y: {
return selectQuadSwap(ResVReg, ResType, I, /*Direction*/ 1);
}
+ case Intrinsic::spv_quad_read_across_diagonal: {
+ return selectQuadSwap(ResVReg, ResType, I, /*Direction*/ 2);
+ }
case Intrinsic::spv_step:
return selectExtInst(ResVReg, ResType, I, CL::step, GL::Step);
case Intrinsic::spv_radians:
diff --git a/llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll b/llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll
new file mode 100644
index 0000000000000..274d5a01c8182
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll
@@ -0,0 +1,87 @@
+; RUN: opt -S -scalarizer -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library < %s | FileCheck %s
+
+; Test that for scalar values, QuadReadAcrossDiagonal maps down to the DirectX op
+
+define noundef half @quad_read_across_diagonal_half(half noundef %expr) {
+entry:
+; CHECK: call half @dx.op.quadOp.f16(i32 123, half %expr, i8 2)
+ %ret = call half @llvm.dx.quad.read.across.diagonal.f16(half %expr)
+ ret half %ret
+}
+
+define noundef float @quad_read_across_diagonal_float(float noundef %expr) {
+entry:
+; CHECK: call float @dx.op.quadOp.f32(i32 123, float %expr, i8 2)
+ %ret = call float @llvm.dx.quad.read.across.diagonal.f32(float %expr)
+ ret float %ret
+}
+
+define noundef double @quad_read_across_diagonal_double(double noundef %expr) {
+entry:
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr, i8 2)
+ %ret = call double @llvm.dx.quad.read.across.diagonal.f64(double %expr)
+ ret double %ret
+}
+
+define noundef i16 @quad_read_across_diagonal_i16(i16 noundef %expr) {
+entry:
+; CHECK: call i16 @dx.op.quadOp.i16(i32 123, i16 %expr, i8 2)
+ %ret = call i16 @llvm.dx.quad.read.across.diagonal.i16(i16 %expr)
+ ret i16 %ret
+}
+
+define noundef i32 @quad_read_across_diagonal_i32(i32 noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr, i8 2)
+ %ret = call i32 @llvm.dx.quad.read.across.diagonal.i32(i32 %expr)
+ ret i32 %ret
+}
+
+define noundef i64 @quad_read_across_diagonal_i64(i64 noundef %expr) {
+entry:
+; CHECK: call i64 @dx.op.quadOp.i64(i32 123, i64 %expr, i8 2)
+ %ret = call i64 @llvm.dx.quad.read.across.diagonal.i64(i64 %expr)
+ ret i64 %ret
+}
+
+declare half @llvm.dx.quad.read.across.diagonal.f16(half)
+declare float @llvm.dx.quad.read.across.diagonal.f32(float)
+declare double @llvm.dx.quad.read.across.diagonal.f64(double)
+
+declare i16 @llvm.dx.quad.read.across.diagonal.i16(i16)
+declare i32 @llvm.dx.quad.read.across.diagonal.i32(i32)
+declare i64 @llvm.dx.quad.read.across.diagonal.i64(i64)
+
+; Test that for vector values, QuadReadAcrossX scalarizes and maps down to the
+; DirectX op
+
+define noundef <2 x half> @quad_read_across_diagonal_v2half(<2 x half> noundef %expr) {
+entry:
+; CHECK: call half @dx.op.quadOp.f16(i32 123, half %expr.i0, i8 2)
+; CHECK: call half @dx.op.quadOp.f16(i32 123, half %expr.i1, i8 2)
+ %ret = call <2 x half> @llvm.dx.quad.read.across.diagonal.v2f16(<2 x half> %expr)
+ ret <2 x half> %ret
+}
+
+define noundef <3 x i32> @quad_read_across_diagonal_v3i32(<3 x i32> noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr.i0, i8 2)
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr.i1, i8 2)
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr.i2, i8 2)
+ %ret = call <3 x i32> @llvm.dx.quad.read.across.diagonal.v3i32(<3 x i32> %expr)
+ ret <3 x i32> %ret
+}
+
+define noundef <4 x double> @quad_read_across_diagonal_v4f64(<4 x double> noundef %expr) {
+entry:
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i0, i8 2)
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i1, i8 2)
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i2, i8 2)
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i3, i8 2)
+ %ret = call <4 x double> @llvm.dx.quad.read.across.diagonal.v464(<4 x double> %expr)
+ ret <4 x double> %ret
+}
+
+declare <2 x half> @llvm.dx.quad.read.across.diagonal.v2f16(<2 x half>)
+declare <3 x i32> @llvm.dx.quad.read.across.diagonal.v3i32(<3 x i32>)
+declare <4 x double> @llvm.dx.quad.read.across.diagonal.v4f64(<4 x double>)
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
index 31b7618f62df2..e5141c57b3124 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
@@ -195,3 +195,10 @@ entry:
%ret = call i32 @llvm.dx.quad.read.across.y.i32(i32 %expr)
ret i32 %ret
}
+
+define noundef i32 @quad_read_across_diagonal_i32(i32 noundef %expr) {
+entry:
+ ; CHECK: Function quad_read_across_diagonal_i32 : [[WAVE_FLAG]]
+ %ret = call i32 @llvm.dx.quad.read.across.diagonal.i32(i32 %expr)
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
new file mode 100644
index 0000000000000..49862ee9f3b62
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
@@ -0,0 +1,44 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %}
+
+; Test lowering to spir-v backend for various types and scalar/vector
+
+; CHECK: OpCapability GroupNonUniformQuad
+
+; CHECK-DAG: %[[#f16:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#f32:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#v4_half:]] = OpTypeVector %[[#f16]] 4
+; CHECK-DAG: %[[#scope:]] = OpConstant %[[#uint]] 3
+; CHECK-DAG: %[[#direction:]] = OpConstant %[[#uint]] 2
+
+; CHECK-LABEL: Begin function test_float
+; CHECK: %[[#fexpr:]] = OpFunctionParameter %[[#f32]]
+define float @test_float(float %fexpr) {
+entry:
+; CHECK: %[[#fret:]] = OpGroupNonUniformQuadSwap %[[#f32]] %[[#scope]] %[[#fexpr]] %[[#direction]]
+ %0 = call float @llvm.spv.quad.read.across.diagonal.f32(float %fexpr)
+ ret float %0
+}
+
+; CHECK-LABEL: Begin function test_int
+; CHECK: %[[#iexpr:]] = OpFunctionParameter %[[#uint]]
+define i32 @test_int(i32 %iexpr) {
+entry:
+; CHECK: %[[#iret:]] = OpGroupNonUniformQuadSwap %[[#uint]] %[[#scope]] %[[#iexpr]] %[[#direction]]
+ %0 = call i32 @llvm.spv.quad.read.across.diagonal.i32(i32 %iexpr)
+ ret i32 %0
+}
+
+; CHECK-LABEL: Begin function test_vhalf
+; CHECK: %[[#vbexpr:]] = OpFunctionParameter %[[#v4_half]]
+define <4 x half> @test_vhalf(<4 x half> %vbexpr) {
+entry:
+; CHECK: %[[#vhalfret:]] = OpGroupNonUniformQuadSwap %[[#v4_half]] %[[#scope]] %[[#vbexpr]] %[[#direction]]
+ %0 = call <4 x half> @llvm.spv.quad.read.across.diagonal.v4half(<4 x half> %vbexpr)
+ ret <4 x half> %0
+}
+
+declare float @llvm.spv.quad.read.across.diagonal.f32(float)
+declare i32 @llvm.spv.quad.read.across.diagonal.i32(i32)
+declare <4 x half> @llvm.spv.quad.read.across.diagonal.v4half(<4 x half>)
>From dae8bd7fcc5cc54e4aa8e73b3723a68d7ed74852 Mon Sep 17 00:00:00 2001
From: Kai Huang <kcloudy0717 at gmail.com>
Date: Thu, 26 Mar 2026 14:18:19 +0800
Subject: [PATCH 2/6] Removed hand-written intrinsic alias and use the new
tablegen approach
---
clang/include/clang/Basic/HLSLIntrinsics.td | 6 ++
.../lib/Headers/hlsl/hlsl_alias_intrinsics.h | 99 -------------------
2 files changed, 6 insertions(+), 99 deletions(-)
diff --git a/clang/include/clang/Basic/HLSLIntrinsics.td b/clang/include/clang/Basic/HLSLIntrinsics.td
index b205491e6ca78..bd33ed64396c1 100644
--- a/clang/include/clang/Basic/HLSLIntrinsics.td
+++ b/clang/include/clang/Basic/HLSLIntrinsics.td
@@ -318,3 +318,9 @@ class HLSLOneArgInlineBuiltin<string name> : HLSLBuiltin<name> {
// Include "hlsl_alias_intrinsics_gen.inc" in hlsl_alias_intrinsics.h
// Include "hlsl_inline_intrinsics_gen.inc" in hlsl_intrinsics.h
+def hlsl_quad_read_across_diagonal : HLSLOneArgBuiltin<"QuadReadAcrossDiagonal", "__builtin_hlsl_quad_read_across_diagonal"> {
+ let Doc = "Returns the value from the lane diagonally opposite from the current lane in the quad.";
+ let VaryingTypes = AllNumericTypes;
+ let VaryingMatDims = [];
+ let IsConvergent = 1;
+}
diff --git a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
index 372f67cef2993..80c415ef66644 100644
--- a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
@@ -3794,105 +3794,6 @@ __attribute__((convergent)) double3 QuadReadAcrossY(double3);
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_y)
__attribute__((convergent)) double4 QuadReadAcrossY(double4);
-//===----------------------------------------------------------------------===//
-// QuadReadAcrossDiagonal builtins
-//===----------------------------------------------------------------------===//
-
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) half QuadReadAcrossDiagonal(half);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) half2 QuadReadAcrossDiagonal(half2);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) half3 QuadReadAcrossDiagonal(half3);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) half4 QuadReadAcrossDiagonal(half4);
-
-#ifdef __HLSL_ENABLE_16_BIT
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int16_t QuadReadAcrossDiagonal(int16_t);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int16_t2 QuadReadAcrossDiagonal(int16_t2);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int16_t3 QuadReadAcrossDiagonal(int16_t3);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int16_t4 QuadReadAcrossDiagonal(int16_t4);
-
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint16_t QuadReadAcrossDiagonal(uint16_t);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint16_t2 QuadReadAcrossDiagonal(uint16_t2);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint16_t3 QuadReadAcrossDiagonal(uint16_t3);
-_HLSL_16BIT_AVAILABILITY_SHADERMODEL_DEFAULT()
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint16_t4 QuadReadAcrossDiagonal(uint16_t4);
-#endif
-
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int QuadReadAcrossDiagonal(int);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int2 QuadReadAcrossDiagonal(int2);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int3 QuadReadAcrossDiagonal(int3);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int4 QuadReadAcrossDiagonal(int4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint QuadReadAcrossDiagonal(uint);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint2 QuadReadAcrossDiagonal(uint2);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint3 QuadReadAcrossDiagonal(uint3);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint4 QuadReadAcrossDiagonal(uint4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int64_t QuadReadAcrossDiagonal(int64_t);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int64_t2 QuadReadAcrossDiagonal(int64_t2);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int64_t3 QuadReadAcrossDiagonal(int64_t3);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) int64_t4 QuadReadAcrossDiagonal(int64_t4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint64_t QuadReadAcrossDiagonal(uint64_t);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint64_t2 QuadReadAcrossDiagonal(uint64_t2);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint64_t3 QuadReadAcrossDiagonal(uint64_t3);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) uint64_t4 QuadReadAcrossDiagonal(uint64_t4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) float QuadReadAcrossDiagonal(float);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) float2 QuadReadAcrossDiagonal(float2);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) float3 QuadReadAcrossDiagonal(float3);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) float4 QuadReadAcrossDiagonal(float4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) double QuadReadAcrossDiagonal(double);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) double2 QuadReadAcrossDiagonal(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) double3 QuadReadAcrossDiagonal(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_diagonal)
-__attribute__((convergent)) double4 QuadReadAcrossDiagonal(double4);
-
//===----------------------------------------------------------------------===//
// sign builtins
//===----------------------------------------------------------------------===//
>From 3c6a3ff91c931c19b54e58e2688ad79cc28ccddd Mon Sep 17 00:00:00 2001
From: Kai Huang <kcloudy0717 at gmail.com>
Date: Thu, 26 Mar 2026 14:30:57 +0800
Subject: [PATCH 3/6] Add `--target-env` option to spirv-val
---
.../CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
index 49862ee9f3b62..e5374453ab869 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s
-; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan1.3-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan1.3-unknown %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %}
; Test lowering to spir-v backend for various types and scalar/vector
>From 7086b2602e8a068a202a271e1e790bf6f4c48764 Mon Sep 17 00:00:00 2001
From: Kai Huang <kcloudy0717 at gmail.com>
Date: Thu, 26 Mar 2026 19:15:07 +0800
Subject: [PATCH 4/6] Fix Linkage capability error
---
.../SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
index e5374453ab869..4d4327662ac42 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan1.3-unknown %s -o - | FileCheck %s
-; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan1.3-unknown %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %}
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan1.3 %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3 %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %}
; Test lowering to spir-v backend for various types and scalar/vector
@@ -14,7 +14,7 @@
; CHECK-LABEL: Begin function test_float
; CHECK: %[[#fexpr:]] = OpFunctionParameter %[[#f32]]
-define float @test_float(float %fexpr) {
+define internal float @test_float(float %fexpr) {
entry:
; CHECK: %[[#fret:]] = OpGroupNonUniformQuadSwap %[[#f32]] %[[#scope]] %[[#fexpr]] %[[#direction]]
%0 = call float @llvm.spv.quad.read.across.diagonal.f32(float %fexpr)
@@ -23,7 +23,7 @@ entry:
; CHECK-LABEL: Begin function test_int
; CHECK: %[[#iexpr:]] = OpFunctionParameter %[[#uint]]
-define i32 @test_int(i32 %iexpr) {
+define internal i32 @test_int(i32 %iexpr) {
entry:
; CHECK: %[[#iret:]] = OpGroupNonUniformQuadSwap %[[#uint]] %[[#scope]] %[[#iexpr]] %[[#direction]]
%0 = call i32 @llvm.spv.quad.read.across.diagonal.i32(i32 %iexpr)
@@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: Begin function test_vhalf
; CHECK: %[[#vbexpr:]] = OpFunctionParameter %[[#v4_half]]
-define <4 x half> @test_vhalf(<4 x half> %vbexpr) {
+define internal <4 x half> @test_vhalf(<4 x half> %vbexpr) {
entry:
; CHECK: %[[#vhalfret:]] = OpGroupNonUniformQuadSwap %[[#v4_half]] %[[#scope]] %[[#vbexpr]] %[[#direction]]
%0 = call <4 x half> @llvm.spv.quad.read.across.diagonal.v4half(<4 x half> %vbexpr)
>From 5167063dd319f95ccce50ccd4f9c9bcf08e265ba Mon Sep 17 00:00:00 2001
From: Kai Huang <kcloudy0717 at gmail.com>
Date: Fri, 27 Mar 2026 01:32:49 +0800
Subject: [PATCH 5/6] Implement a main function for the test to resolve linkage
error
---
.../CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
index 4d4327662ac42..dd1f16eeceb32 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossDiagonal.ll
@@ -39,6 +39,12 @@ entry:
ret <4 x half> %0
}
+define void @main() #0 {
+ ret void
+}
+
declare float @llvm.spv.quad.read.across.diagonal.f32(float)
declare i32 @llvm.spv.quad.read.across.diagonal.i32(i32)
declare <4 x half> @llvm.spv.quad.read.across.diagonal.v4half(<4 x half>)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
>From c7cded9011e61a5a3f994b2afca9c506dbf73202 Mon Sep 17 00:00:00 2001
From: Kai Huang <kcloudy0717 at gmail.com>
Date: Sun, 29 Mar 2026 16:53:24 +0800
Subject: [PATCH 6/6] Clean up tests by passing target/cc as part of FileCheck
cmd -D
---
.../builtins/QuadReadAcrossDiagonal.hlsl | 90 +++++++++----------
.../CodeGen/DirectX/QuadReadAcrossDiagonal.ll | 2 +-
2 files changed, 45 insertions(+), 47 deletions(-)
diff --git a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl
index f89be574e8e75..0cb9167437574 100644
--- a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossDiagonal.hlsl
@@ -1,171 +1,169 @@
// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-compute %s -fnative-half-type -fnative-int16-type \
// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
-// RUN: --check-prefixes=CHECK,CHECK-DXIL,CHECK-NATIVE_HALF
+// RUN: --check-prefixes=CHECK,CHECK-NATIVE_HALF -DTARGET=dx -DCC=""
// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-DXIL,CHECK-NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NO_HALF -DTARGET=dx -DCC=""
// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
// RUN: spirv-unknown-vulkan-compute %s -fnative-half-type -fnative-int16-type \
// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
-// RUN: --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NATIVE_HALF
+// RUN: --check-prefixes=CHECK,CHECK-NATIVE_HALF -DTARGET=spv -DCC="spir_func "
// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
// RUN: spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NO_HALF -DTARGET=spv -DCC="spir_func "
-// Capture the expected interchange format so not every check needs to be duplicated
-// CHECK-DXIL: %[[RET:.*]] = call [[CC:]]i32 @llvm.[[ICF:dx]].quad.read.across.diagonal.i32(i32 %[[#]])
-// CHECK-SPIRV: %[[RET:.*]] = call [[CC:spir_func ]]i32 @llvm.[[ICF:spv]].quad.read.across.diagonal.i32(i32 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i32 @llvm.[[TARGET]].quad.read.across.diagonal.i32(i32 %[[#]])
// CHECK: ret i32 %[[RET]]
int test_int(int expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v2i32(<2 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> @llvm.[[TARGET]].quad.read.across.diagonal.v2i32(<2 x i32> %[[#]])
// CHECK: ret <2 x i32> %[[RET]]
int2 test_int2(int2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v3i32(<3 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> @llvm.[[TARGET]].quad.read.across.diagonal.v3i32(<3 x i32> %[[#]])
// CHECK: ret <3 x i32> %[[RET]]
int3 test_int3(int3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v4i32(<4 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> @llvm.[[TARGET]].quad.read.across.diagonal.v4i32(<4 x i32> %[[#]])
// CHECK: ret <4 x i32> %[[RET]]
int4 test_int4(int4 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]i32 @llvm.[[ICF]].quad.read.across.diagonal.i32(i32 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i32 @llvm.[[TARGET]].quad.read.across.diagonal.i32(i32 %[[#]])
// CHECK: ret i32 %[[RET]]
uint test_uint(uint expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v2i32(<2 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> @llvm.[[TARGET]].quad.read.across.diagonal.v2i32(<2 x i32> %[[#]])
// CHECK: ret <2 x i32> %[[RET]]
uint2 test_uint2(uint2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v3i32(<3 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> @llvm.[[TARGET]].quad.read.across.diagonal.v3i32(<3 x i32> %[[#]])
// CHECK: ret <3 x i32> %[[RET]]
uint3 test_uint3(uint3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> @llvm.[[ICF]].quad.read.across.diagonal.v4i32(<4 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> @llvm.[[TARGET]].quad.read.across.diagonal.v4i32(<4 x i32> %[[#]])
// CHECK: ret <4 x i32> %[[RET]]
uint4 test_uint4(uint4 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]i64 @llvm.[[ICF]].quad.read.across.diagonal.i64(i64 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i64 @llvm.[[TARGET]].quad.read.across.diagonal.i64(i64 %[[#]])
// CHECK: ret i64 %[[RET]]
int64_t test_int64_t(int64_t expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v2i64(<2 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> @llvm.[[TARGET]].quad.read.across.diagonal.v2i64(<2 x i64> %[[#]])
// CHECK: ret <2 x i64> %[[RET]]
int64_t2 test_int64_t2(int64_t2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v3i64(<3 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> @llvm.[[TARGET]].quad.read.across.diagonal.v3i64(<3 x i64> %[[#]])
// CHECK: ret <3 x i64> %[[RET]]
int64_t3 test_int64_t3(int64_t3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v4i64(<4 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> @llvm.[[TARGET]].quad.read.across.diagonal.v4i64(<4 x i64> %[[#]])
// CHECK: ret <4 x i64> %[[RET]]
int64_t4 test_int64_t4(int64_t4 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]i64 @llvm.[[ICF]].quad.read.across.diagonal.i64(i64 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i64 @llvm.[[TARGET]].quad.read.across.diagonal.i64(i64 %[[#]])
// CHECK: ret i64 %[[RET]]
uint64_t test_uint64_t(uint64_t expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v2i64(<2 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> @llvm.[[TARGET]].quad.read.across.diagonal.v2i64(<2 x i64> %[[#]])
// CHECK: ret <2 x i64> %[[RET]]
uint64_t2 test_uint64_t2(uint64_t2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v3i64(<3 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> @llvm.[[TARGET]].quad.read.across.diagonal.v3i64(<3 x i64> %[[#]])
// CHECK: ret <3 x i64> %[[RET]]
uint64_t3 test_uint64_t3(uint64_t3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> @llvm.[[ICF]].quad.read.across.diagonal.v4i64(<4 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> @llvm.[[TARGET]].quad.read.across.diagonal.v4i64(<4 x i64> %[[#]])
// CHECK: ret <4 x i64> %[[RET]]
uint64_t4 test_uint64_t4(uint64_t4 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float @llvm.[[ICF]].quad.read.across.diagonal.f32(float %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float @llvm.[[TARGET]].quad.read.across.diagonal.f32(float %[[#]])
// CHECK: ret float %[[RET]]
float test_float(float expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> @llvm.[[ICF]].quad.read.across.diagonal.v2f32(<2 x float> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> @llvm.[[TARGET]].quad.read.across.diagonal.v2f32(<2 x float> %[[#]])
// CHECK: ret <2 x float> %[[RET]]
float2 test_float2(float2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> @llvm.[[ICF]].quad.read.across.diagonal.v3f32(<3 x float> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> @llvm.[[TARGET]].quad.read.across.diagonal.v3f32(<3 x float> %[[#]])
// CHECK: ret <3 x float> %[[RET]]
float3 test_float3(float3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> @llvm.[[ICF]].quad.read.across.diagonal.v4f32(<4 x float> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> @llvm.[[TARGET]].quad.read.across.diagonal.v4f32(<4 x float> %[[#]])
// CHECK: ret <4 x float> %[[RET]]
float4 test_float4(float4 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]double @llvm.[[ICF]].quad.read.across.diagonal.f64(double %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]double @llvm.[[TARGET]].quad.read.across.diagonal.f64(double %[[#]])
// CHECK: ret double %[[RET]]
double test_double(double expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x double> @llvm.[[ICF]].quad.read.across.diagonal.v2f64(<2 x double> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x double> @llvm.[[TARGET]].quad.read.across.diagonal.v2f64(<2 x double> %[[#]])
// CHECK: ret <2 x double> %[[RET]]
double2 test_double2(double2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x double> @llvm.[[ICF]].quad.read.across.diagonal.v3f64(<3 x double> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x double> @llvm.[[TARGET]].quad.read.across.diagonal.v3f64(<3 x double> %[[#]])
// CHECK: ret <3 x double> %[[RET]]
double3 test_double3(double3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x double> @llvm.[[ICF]].quad.read.across.diagonal.v4f64(<4 x double> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x double> @llvm.[[TARGET]].quad.read.across.diagonal.v4f64(<4 x double> %[[#]])
// CHECK: ret <4 x double> %[[RET]]
double4 test_double4(double4 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]half @llvm.[[ICF]].quad.read.across.diagonal.f16(half %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]half @llvm.[[TARGET]].quad.read.across.diagonal.f16(half %[[#]])
// CHECK-NATIVE_HALF: ret half %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float @llvm.[[ICF]].quad.read.across.diagonal.f32(float %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float @llvm.[[TARGET]].quad.read.across.diagonal.f32(float %[[#]])
// CHECK-NO_HALF: ret float %[[RET]]
half test_half(half expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x half> @llvm.[[ICF]].quad.read.across.diagonal.v2f16(<2 x half> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x half> @llvm.[[TARGET]].quad.read.across.diagonal.v2f16(<2 x half> %[[#]])
// CHECK-NATIVE_HALF: ret <2 x half> %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> @llvm.[[ICF]].quad.read.across.diagonal.v2f32(<2 x float> %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> @llvm.[[TARGET]].quad.read.across.diagonal.v2f32(<2 x float> %[[#]])
// CHECK-NO_HALF: ret <2 x float> %[[RET]]
half2 test_half2(half2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x half> @llvm.[[ICF]].quad.read.across.diagonal.v3f16(<3 x half> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x half> @llvm.[[TARGET]].quad.read.across.diagonal.v3f16(<3 x half> %[[#]])
// CHECK-NATIVE_HALF: ret <3 x half> %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> @llvm.[[ICF]].quad.read.across.diagonal.v3f32(<3 x float> %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> @llvm.[[TARGET]].quad.read.across.diagonal.v3f32(<3 x float> %[[#]])
// CHECK-NO_HALF: ret <3 x float> %[[RET]]
half3 test_half3(half3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x half> @llvm.[[ICF]].quad.read.across.diagonal.v4f16(<4 x half> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x half> @llvm.[[TARGET]].quad.read.across.diagonal.v4f16(<4 x half> %[[#]])
// CHECK-NATIVE_HALF: ret <4 x half> %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> @llvm.[[ICF]].quad.read.across.diagonal.v4f32(<4 x float> %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> @llvm.[[TARGET]].quad.read.across.diagonal.v4f32(<4 x float> %[[#]])
// CHECK-NO_HALF: ret <4 x float> %[[RET]]
half4 test_half4(half4 expr) { return QuadReadAcrossDiagonal(expr); }
#ifdef __HLSL_ENABLE_16_BIT
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 @llvm.[[ICF]].quad.read.across.diagonal.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 @llvm.[[TARGET]].quad.read.across.diagonal.i16(i16 %[[#]])
// CHECK-NATIVE_HALF: ret i16 %[[RET]]
int16_t test_int16_t(int16_t expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> @llvm.[[TARGET]].quad.read.across.diagonal.v2i16(<2 x i16> %[[#]])
// CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
int16_t2 test_int16_t2(int16_t2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> @llvm.[[TARGET]].quad.read.across.diagonal.v3i16(<3 x i16> %[[#]])
// CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
int16_t3 test_int16_t3(int16_t3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> @llvm.[[TARGET]].quad.read.across.diagonal.v4i16(<4 x i16> %[[#]])
// CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
int16_t4 test_int16_t4(int16_t4 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 @llvm.[[ICF]].quad.read.across.diagonal.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 @llvm.[[TARGET]].quad.read.across.diagonal.i16(i16 %[[#]])
// CHECK-NATIVE_HALF: ret i16 %[[RET]]
uint16_t test_uint16_t(uint16_t expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> @llvm.[[TARGET]].quad.read.across.diagonal.v2i16(<2 x i16> %[[#]])
// CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
uint16_t2 test_uint16_t2(uint16_t2 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> @llvm.[[TARGET]].quad.read.across.diagonal.v3i16(<3 x i16> %[[#]])
// CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
uint16_t3 test_uint16_t3(uint16_t3 expr) { return QuadReadAcrossDiagonal(expr); }
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> @llvm.[[ICF]].quad.read.across.diagonal.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> @llvm.[[TARGET]].quad.read.across.diagonal.v4i16(<4 x i16> %[[#]])
// CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
uint16_t4 test_uint16_t4(uint16_t4 expr) { return QuadReadAcrossDiagonal(expr); }
#endif
diff --git a/llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll b/llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll
index 274d5a01c8182..68c518af5ddb0 100644
--- a/llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll
+++ b/llvm/test/CodeGen/DirectX/QuadReadAcrossDiagonal.ll
@@ -52,7 +52,7 @@ declare i16 @llvm.dx.quad.read.across.diagonal.i16(i16)
declare i32 @llvm.dx.quad.read.across.diagonal.i32(i32)
declare i64 @llvm.dx.quad.read.across.diagonal.i64(i64)
-; Test that for vector values, QuadReadAcrossX scalarizes and maps down to the
+; Test that for vector values, QuadReadAcrossDiagonal scalarizes and maps down to the
; DirectX op
define noundef <2 x half> @quad_read_across_diagonal_v2half(<2 x half> noundef %expr) {
More information about the llvm-commits
mailing list