[clang] [llvm] [HLSL] Implement elementwise firstbitlow builtin (PR #116858)
Ashley Coleman via cfe-commits
cfe-commits at lists.llvm.org
Tue Nov 19 10:57:00 PST 2024
https://github.com/V-FEXrt created https://github.com/llvm/llvm-project/pull/116858
Closes https://github.com/llvm/llvm-project/issues/99116
- [x] Implement firstbitlow clang builtin,
- [x] Link firstbitlow clang builtin with hlsl_intrinsics.h
- [x] Add sema checks for firstbitlow to CheckHLSLBuiltinFunctionCall in SemaChecking.cpp
- [x] Add codegen for firstbitlow to EmitHLSLBuiltinExpr in CGBuiltin.cpp
- [x] Add codegen tests to clang/test/CodeGenHLSL/builtins/firstbitlow.hlsl
- [x] Add sema tests to clang/test/SemaHLSL/BuiltIns/firstbitlow-errors.hlsl
- [x] Create the int_dx_firstbitlow intrinsic in IntrinsicsDirectX.td
- [x] Create the DXILOpMapping of int_dx_firstbitlow to 32 in DXIL.td
- [x] Create the firstbitlow.ll and firstbitlow_errors.ll tests in llvm/test/CodeGen/DirectX/
- [x] Create the int_spv_firstbitlow intrinsic in IntrinsicsSPIRV.td
- [x] In SPIRVInstructionSelector.cpp create the firstbitlow lowering and map it to int_spv_firstbitlow in SPIRVInstructionSelector::selectIntrinsic.
- [x] Create SPIR-V backend test case in llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll
>From 49021b33222e0553a3ea52d9802bf133cfa97700 Mon Sep 17 00:00:00 2001
From: Ashley Coleman <ascoleman at microsoft.com>
Date: Thu, 14 Nov 2024 11:53:39 -0700
Subject: [PATCH 1/3] [HLSL] Implement elementwise firstbitlow builtin
---
clang/include/clang/Basic/Builtins.td | 6 +
clang/lib/CodeGen/CGBuiltin.cpp | 9 +-
clang/lib/CodeGen/CGHLSLRuntime.h | 1 +
clang/lib/Headers/hlsl/hlsl_intrinsics.h | 72 ++++++++
clang/lib/Sema/SemaHLSL.cpp | 3 +-
.../CodeGenHLSL/builtins/firstbitlow.hlsl | 153 ++++++++++++++++
.../BuiltIns/firstbithigh-errors.hlsl | 6 +-
.../SemaHLSL/BuiltIns/firstbitlow-errors.hlsl | 26 +++
llvm/include/llvm/IR/IntrinsicsDirectX.td | 2 +
llvm/include/llvm/IR/IntrinsicsSPIRV.td | 1 +
llvm/lib/Target/DirectX/DXIL.td | 13 ++
.../DirectX/DirectXTargetTransformInfo.cpp | 1 +
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 169 ++++++++++++++++++
llvm/test/CodeGen/DirectX/firstbitlow.ll | 47 +++++
.../test/CodeGen/DirectX/firstbitlow_error.ll | 10 ++
.../SPIRV/hlsl-intrinsics/firstbitlow.ll | 104 +++++++++++
16 files changed, 617 insertions(+), 6 deletions(-)
create mode 100644 clang/test/CodeGenHLSL/builtins/firstbitlow.hlsl
create mode 100644 clang/test/SemaHLSL/BuiltIns/firstbitlow-errors.hlsl
create mode 100644 llvm/test/CodeGen/DirectX/firstbitlow.ll
create mode 100644 llvm/test/CodeGen/DirectX/firstbitlow_error.ll
create mode 100644 llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index e866605ac05c09..afe0a311f236d8 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -4810,6 +4810,12 @@ def HLSLFirstBitHigh : LangBuiltin<"HLSL_LANG"> {
let Prototype = "void(...)";
}
+def HLSLFirstBitLow : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_elementwise_firstbitlow"];
+ let Attributes = [NoThrow, Const];
+ let Prototype = "void(...)";
+}
+
def HLSLFrac : LangBuiltin<"HLSL_LANG"> {
let Spellings = ["__builtin_hlsl_elementwise_frac"];
let Attributes = [NoThrow, Const];
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 9e0c0bff0125c0..a65b96684f18eb 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -18956,7 +18956,6 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
"hlsl.dot4add.u8packed");
}
case Builtin::BI__builtin_hlsl_elementwise_firstbithigh: {
-
Value *X = EmitScalarExpr(E->getArg(0));
return Builder.CreateIntrinsic(
@@ -18964,6 +18963,14 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
getFirstBitHighIntrinsic(CGM.getHLSLRuntime(), E->getArg(0)->getType()),
ArrayRef<Value *>{X}, nullptr, "hlsl.firstbithigh");
}
+ case Builtin::BI__builtin_hlsl_elementwise_firstbitlow: {
+ Value *X = EmitScalarExpr(E->getArg(0));
+
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/ConvertType(E->getType()),
+ CGM.getHLSLRuntime().getFirstBitLowIntrinsic(), ArrayRef<Value *>{X},
+ nullptr, "hlsl.firstbitlow");
+ }
case Builtin::BI__builtin_hlsl_lerp: {
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 381a5959ec098e..c9eb1b08ff6ba6 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -96,6 +96,7 @@ class CGHLSLRuntime {
GENERATE_HLSL_INTRINSIC_FUNCTION(WaveReadLaneAt, wave_readlane)
GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitUHigh, firstbituhigh)
GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitSHigh, firstbitshigh)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitLow, firstbitlow)
GENERATE_HLSL_INTRINSIC_FUNCTION(NClamp, nclamp)
GENERATE_HLSL_INTRINSIC_FUNCTION(SClamp, sclamp)
GENERATE_HLSL_INTRINSIC_FUNCTION(UClamp, uclamp)
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index 2ee3827d720495..610ddcea203d90 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -1086,6 +1086,78 @@ uint3 firstbithigh(uint64_t3);
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbithigh)
uint4 firstbithigh(uint64_t4);
+//===----------------------------------------------------------------------===//
+// firstbitlow builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T firstbitlow(T Val)
+/// \brief Returns the location of the first set bit starting from the lowest
+/// order bit and working upward, per component.
+/// \param Val the input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint firstbitlow(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint2 firstbitlow(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint3 firstbitlow(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint4 firstbitlow(int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint firstbitlow(uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint2 firstbitlow(uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint3 firstbitlow(uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint4 firstbitlow(uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint firstbitlow(int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint2 firstbitlow(int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint3 firstbitlow(int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint4 firstbitlow(int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint firstbitlow(uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint2 firstbitlow(uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint3 firstbitlow(uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint4 firstbitlow(uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint firstbitlow(int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint2 firstbitlow(int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint3 firstbitlow(int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint4 firstbitlow(int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint firstbitlow(uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint2 firstbitlow(uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint3 firstbitlow(uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_firstbitlow)
+uint4 firstbitlow(uint64_t4);
+
//===----------------------------------------------------------------------===//
// floor builtins
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 65b0d9cd65637f..c90ba9ae9e0f84 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -1947,7 +1947,8 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return true;
break;
}
- case Builtin::BI__builtin_hlsl_elementwise_firstbithigh: {
+ case Builtin::BI__builtin_hlsl_elementwise_firstbithigh:
+ case Builtin::BI__builtin_hlsl_elementwise_firstbitlow: {
if (SemaRef.PrepareBuiltinElementwiseMathOneArgCall(TheCall))
return true;
diff --git a/clang/test/CodeGenHLSL/builtins/firstbitlow.hlsl b/clang/test/CodeGenHLSL/builtins/firstbitlow.hlsl
new file mode 100644
index 00000000000000..5d490fabc5bc8d
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/firstbitlow.hlsl
@@ -0,0 +1,153 @@
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s -DTARGET=dx
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-unknown-vulkan-compute %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s -DTARGET=spv
+
+#ifdef __HLSL_ENABLE_16_BIT
+// CHECK-LABEL: test_firstbitlow_ushort
+// CHECK: call i32 @llvm.[[TARGET]].firstbitlow.i16
+uint test_firstbitlow_ushort(uint16_t p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_ushort2
+// CHECK: call <2 x i32> @llvm.[[TARGET]].firstbitlow.v2i16
+uint2 test_firstbitlow_ushort2(uint16_t2 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_ushort3
+// CHECK: call <3 x i32> @llvm.[[TARGET]].firstbitlow.v3i16
+uint3 test_firstbitlow_ushort3(uint16_t3 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_ushort4
+// CHECK: call <4 x i32> @llvm.[[TARGET]].firstbitlow.v4i16
+uint4 test_firstbitlow_ushort4(uint16_t4 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_short
+// CHECK: call i32 @llvm.[[TARGET]].firstbitlow.i16
+uint test_firstbitlow_short(int16_t p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_short2
+// CHECK: call <2 x i32> @llvm.[[TARGET]].firstbitlow.v2i16
+uint2 test_firstbitlow_short2(int16_t2 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_short3
+// CHECK: call <3 x i32> @llvm.[[TARGET]].firstbitlow.v3i16
+uint3 test_firstbitlow_short3(int16_t3 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_short4
+// CHECK: call <4 x i32> @llvm.[[TARGET]].firstbitlow.v4i16
+uint4 test_firstbitlow_short4(int16_t4 p0) {
+ return firstbitlow(p0);
+}
+#endif // __HLSL_ENABLE_16_BIT
+
+// CHECK-LABEL: test_firstbitlow_uint
+// CHECK: call i32 @llvm.[[TARGET]].firstbitlow.i32
+uint test_firstbitlow_uint(uint p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_uint2
+// CHECK: call <2 x i32> @llvm.[[TARGET]].firstbitlow.v2i32
+uint2 test_firstbitlow_uint2(uint2 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_uint3
+// CHECK: call <3 x i32> @llvm.[[TARGET]].firstbitlow.v3i32
+uint3 test_firstbitlow_uint3(uint3 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_uint4
+// CHECK: call <4 x i32> @llvm.[[TARGET]].firstbitlow.v4i32
+uint4 test_firstbitlow_uint4(uint4 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_ulong
+// CHECK: call i32 @llvm.[[TARGET]].firstbitlow.i64
+uint test_firstbitlow_ulong(uint64_t p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_ulong2
+// CHECK: call <2 x i32> @llvm.[[TARGET]].firstbitlow.v2i64
+uint2 test_firstbitlow_ulong2(uint64_t2 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_ulong3
+// CHECK: call <3 x i32> @llvm.[[TARGET]].firstbitlow.v3i64
+uint3 test_firstbitlow_ulong3(uint64_t3 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_ulong4
+// CHECK: call <4 x i32> @llvm.[[TARGET]].firstbitlow.v4i64
+uint4 test_firstbitlow_ulong4(uint64_t4 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_int
+// CHECK: call i32 @llvm.[[TARGET]].firstbitlow.i32
+uint test_firstbitlow_int(int p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_int2
+// CHECK: call <2 x i32> @llvm.[[TARGET]].firstbitlow.v2i32
+uint2 test_firstbitlow_int2(int2 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_int3
+// CHECK: call <3 x i32> @llvm.[[TARGET]].firstbitlow.v3i32
+uint3 test_firstbitlow_int3(int3 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_int4
+// CHECK: call <4 x i32> @llvm.[[TARGET]].firstbitlow.v4i32
+uint4 test_firstbitlow_int4(int4 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_long
+// CHECK: call i32 @llvm.[[TARGET]].firstbitlow.i64
+uint test_firstbitlow_long(int64_t p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_long2
+// CHECK: call <2 x i32> @llvm.[[TARGET]].firstbitlow.v2i64
+uint2 test_firstbitlow_long2(int64_t2 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_long3
+// CHECK: call <3 x i32> @llvm.[[TARGET]].firstbitlow.v3i64
+uint3 test_firstbitlow_long3(int64_t3 p0) {
+ return firstbitlow(p0);
+}
+
+// CHECK-LABEL: test_firstbitlow_long4
+// CHECK: call <4 x i32> @llvm.[[TARGET]].firstbitlow.v4i64
+uint4 test_firstbitlow_long4(int64_t4 p0) {
+ return firstbitlow(p0);
+}
diff --git a/clang/test/SemaHLSL/BuiltIns/firstbithigh-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/firstbithigh-errors.hlsl
index 1912ab3ae806b3..b4024418dbba4f 100644
--- a/clang/test/SemaHLSL/BuiltIns/firstbithigh-errors.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/firstbithigh-errors.hlsl
@@ -17,12 +17,10 @@ double test_int_builtin(double p0) {
double2 test_int_builtin_2(double2 p0) {
return __builtin_hlsl_elementwise_firstbithigh(p0);
- // expected-error at -1 {{1st argument must be a vector of integers
- // (was 'double2' (aka 'vector<double, 2>'))}}
+ // expected-error at -1 {{1st argument must be a vector of integers (was 'double2' (aka 'vector<double, 2>'))}}
}
float test_int_builtin_3(float p0) {
return __builtin_hlsl_elementwise_firstbithigh(p0);
- // expected-error at -1 {{1st argument must be a vector of integers
- // (was 'float')}}
+ // expected-error at -1 {{1st argument must be a vector of integers (was 'double')}}
}
diff --git a/clang/test/SemaHLSL/BuiltIns/firstbitlow-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/firstbitlow-errors.hlsl
new file mode 100644
index 00000000000000..95c25e9e2fb60d
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/firstbitlow-errors.hlsl
@@ -0,0 +1,26 @@
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm-only -disable-llvm-passes -verify -verify-ignore-unexpected
+
+int test_too_few_arg() {
+ return firstbitlow();
+ // expected-error at -1 {{no matching function for call to 'firstbitlow'}}
+}
+
+int test_too_many_arg(int p0) {
+ return firstbitlow(p0, p0);
+ // expected-error at -1 {{no matching function for call to 'firstbitlow'}}
+}
+
+double test_int_builtin(double p0) {
+ return firstbitlow(p0);
+ // expected-error at -1 {{call to 'firstbitlow' is ambiguous}}
+}
+
+double2 test_int_builtin_2(double2 p0) {
+ return __builtin_hlsl_elementwise_firstbitlow(p0);
+ // expected-error at -1 {{1st argument must be a vector of integers (was 'double2' (aka 'vector<double, 2>'))}}
+}
+
+float test_int_builtin_3(float p0) {
+ return __builtin_hlsl_elementwise_firstbitlow(p0);
+ // expected-error at -1 {{1st argument must be a vector of integers (was 'double')}}
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index 6093664c908dc5..f2f393a02d19c7 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -103,4 +103,6 @@ def int_dx_splitdouble : DefaultAttrsIntrinsic<[llvm_anyint_ty, LLVMMatchType<0>
def int_dx_radians : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
def int_dx_firstbituhigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
def int_dx_firstbitshigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
+// TODO: check this against the spec
+def int_dx_firstbitlow : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
}
diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
index f29eb7ee22b2d2..63e99524511143 100644
--- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td
+++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
@@ -106,6 +106,7 @@ let TargetPrefix = "spv" in {
[IntrNoMem]>;
def int_spv_firstbituhigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
def int_spv_firstbitshigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
+ def int_spv_firstbitlow : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
// Read a value from the image buffer. It does not translate directly to a
// single OpImageRead because the result type is not necessarily a 4 element
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 078f0591a67515..5b65d45a742d9d 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -564,6 +564,19 @@ def CountBits : DXILOp<31, unaryBits> {
let attributes = [Attributes<DXIL1_0, [ReadNone]>];
}
+def FirstbitLo : DXILOp<32, unaryBits> {
+ let Doc = "Returns the location of the first set bit starting from "
+ "the lowest order bit and working upward.";
+ let LLVMIntrinsic = int_dx_firstbitlow;
+ let arguments = [OverloadTy];
+ let result = Int32Ty;
+ let overloads =
+ [Overloads<DXIL1_0, [Int16Ty, Int32Ty, Int64Ty]>];
+ let stages = [Stages<DXIL1_0, [all_stages]>];
+ // TODO: check these
+ let attributes = [Attributes<DXIL1_0, [ReadNone]>];
+}
+
def FirstbitHi : DXILOp<33, unaryBits> {
let Doc = "Returns the location of the first set bit starting from "
"the highest order bit and working downward.";
diff --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
index b0436a39423405..c37e0dbb82e5a0 100644
--- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
@@ -34,6 +34,7 @@ bool DirectXTTIImpl::isTargetIntrinsicTriviallyScalarizable(
case Intrinsic::dx_splitdouble:
case Intrinsic::dx_firstbituhigh:
case Intrinsic::dx_firstbitshigh:
+ case Intrinsic::dx_firstbitlow:
return true;
default:
return false;
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 8a8835e0269200..8c2336df3e5b3b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -106,6 +106,18 @@ class SPIRVInstructionSelector : public InstructionSelector {
bool selectFirstBitHigh64(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I, bool IsSigned) const;
+ bool selectFirstBitLow(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
+ bool selectFirstBitLow16(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
+ bool selectFirstBitLow32(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I, Register SrcReg) const;
+
+ bool selectFirstBitLow64(Register ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
+
bool selectGlobalValue(Register ResVReg, MachineInstr &I,
const MachineInstr *Init = nullptr) const;
@@ -2786,6 +2798,9 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/false);
case Intrinsic::spv_firstbitshigh: // There is no CL equivalent of FindSMsb
return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/true);
+ case Intrinsic::spv_firstbitlow: // There is no CL equivlent of FindILsb
+ // (true?)
+ return selectFirstBitLow(ResVReg, ResType, I);
case Intrinsic::spv_group_memory_barrier_with_group_sync: {
bool Result = true;
auto MemSemConstant =
@@ -3158,6 +3173,160 @@ bool SPIRVInstructionSelector::selectFirstBitHigh(Register ResVReg,
}
}
+bool SPIRVInstructionSelector::selectFirstBitLow16(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ // OpUConvert treats the operand bits as an unsigned i16 and zero extends it
+ // to an unsigned i32. As this leaves all the least significant bits unchanged
+ // the first set bit from the LSB side doesn't change.
+ Register ExtReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
+ bool Result = selectNAryOpWithSrcs(ExtReg, ResType, I, {I.getOperand(2).getReg()},
+ SPIRV::OpUConvert);
+ return Result && selectFirstBitLow32(ResVReg, ResType, I, ExtReg);
+}
+
+bool SPIRVInstructionSelector::selectFirstBitLow32(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I,
+ Register SrcReg) const {
+ return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
+ .addImm(GL::FindILsb)
+ .addUse(SrcReg)
+ .constrainAllUses(TII, TRI, RBI);
+}
+
+bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ Register OpReg = I.getOperand(2).getReg();
+
+ // 1. Split int64 into 2 pieces using a bitcast
+ unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
+ SPIRVType *BaseType = GR.retrieveScalarOrVectorIntType(ResType);
+ MachineIRBuilder MIRBuilder(I);
+ SPIRVType *PostCastType =
+ GR.getOrCreateSPIRVVectorType(BaseType, 2 * ComponentCount, MIRBuilder);
+ Register BitcastReg = MRI->createVirtualRegister(GR.getRegClass(PostCastType));
+ bool Result =
+ selectUnOpWithSrc(BitcastReg, PostCastType, I, OpReg, SPIRV::OpBitcast);
+
+ // 2. Find the first set bit from the LSB side for all the pieces in #1
+ Register FBLReg = MRI->createVirtualRegister(GR.getRegClass(PostCastType));
+ Result = Result && selectFirstBitLow32(FBLReg, PostCastType, I, BitcastReg);
+
+ // 3. Split result vector into high bits and low bits
+ Register HighReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
+ Register LowReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
+
+ bool ZeroAsNull = STI.isOpenCLEnv();
+ bool IsScalarRes = ResType->getOpcode() != SPIRV::OpTypeVector;
+ if (IsScalarRes) {
+ // if scalar do a vector extract
+ Result = Result && selectNAryOpWithSrcs(
+ HighReg, ResType, I,
+ {FBLReg, GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull)},
+ SPIRV::OpVectorExtractDynamic);
+ Result = Result && selectNAryOpWithSrcs(
+ LowReg, ResType, I,
+ {FBLReg, GR.getOrCreateConstInt(1, I, ResType, TII, ZeroAsNull)},
+ SPIRV::OpVectorExtractDynamic);
+ } else {
+ // if vector do a shufflevector
+ auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(SPIRV::OpVectorShuffle))
+ .addDef(HighReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(FBLReg)
+ // Per the spec, repeat the vector if only one vec is needed
+ .addUse(FBLReg);
+
+ // high bits are store in even indexes. Extract them from FBLReg
+ for (unsigned j = 0; j < ComponentCount * 2; j += 2) {
+ MIB.addImm(j);
+ }
+ Result = Result && MIB.constrainAllUses(TII, TRI, RBI);
+
+ MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(SPIRV::OpVectorShuffle))
+ .addDef(LowReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(FBLReg)
+ // Per the spec, repeat the vector if only one vec is needed
+ .addUse(FBLReg);
+
+ // low bits are store in odd indexes. Extract them from FBLReg
+ for (unsigned j = 1; j < ComponentCount * 2; j += 2) {
+ MIB.addImm(j);
+ }
+ Result = Result && MIB.constrainAllUses(TII, TRI, RBI);
+ }
+
+ // 4. Check if result of each bottom 32 bits is == -1
+ SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
+ Register NegOneReg;
+ Register Reg0;
+ Register Reg32;
+ unsigned SelectOp;
+ unsigned AddOp;
+
+ if (IsScalarRes) {
+ NegOneReg =
+ GR.getOrCreateConstInt((unsigned)-1, I, ResType, TII, ZeroAsNull);
+ Reg0 = GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
+ Reg32 = GR.getOrCreateConstInt(32, I, ResType, TII, ZeroAsNull);
+ SelectOp = SPIRV::OpSelectSISCond;
+ AddOp = SPIRV::OpIAddS;
+ } else {
+ BoolType = GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
+ NegOneReg =
+ GR.getOrCreateConstVector((unsigned)-1, I, ResType, TII, ZeroAsNull);
+ Reg0 = GR.getOrCreateConstVector(0, I, ResType, TII, ZeroAsNull);
+ Reg32 = GR.getOrCreateConstVector(32, I, ResType, TII, ZeroAsNull);
+ SelectOp = SPIRV::OpSelectVIVCond;
+ AddOp = SPIRV::OpIAddV;
+ }
+
+ // Check if the low bits are == -1; true if -1
+ Register BReg = MRI->createVirtualRegister(GR.getRegClass(BoolType));
+ Result = Result && selectNAryOpWithSrcs(BReg, BoolType, I, {LowReg, NegOneReg},
+ SPIRV::OpIEqual);
+
+ // Select high bits if true in BReg, otherwise low bits
+ Register TmpReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
+ Result = Result && selectNAryOpWithSrcs(TmpReg, ResType, I, {BReg, HighReg, LowReg},
+ SelectOp);
+
+ // Add 32 for high bits, 0 for low bits
+ Register ValReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
+ Result = Result &&
+ selectNAryOpWithSrcs(ValReg, ResType, I, {BReg, Reg32, Reg0}, SelectOp);
+
+ return Result &&
+ selectNAryOpWithSrcs(ResVReg, ResType, I, {ValReg, TmpReg}, AddOp);
+}
+
+bool SPIRVInstructionSelector::selectFirstBitLow(Register ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ // FindILsb intrinsic only supports 32 bit integers
+ Register OpReg = I.getOperand(2).getReg();
+ SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
+
+ switch (GR.getScalarOrVectorBitWidth(OpType)) {
+ case 16:
+ return selectFirstBitLow16(ResVReg, ResType, I);
+ case 32:
+ return selectFirstBitLow32(ResVReg, ResType, I, OpReg);
+ case 64:
+ return selectFirstBitLow64(ResVReg, ResType, I);
+ default:
+ report_fatal_error("spv_firstbitlow only supports 16,32,64 bits.");
+ }
+}
+
bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
diff --git a/llvm/test/CodeGen/DirectX/firstbitlow.ll b/llvm/test/CodeGen/DirectX/firstbitlow.ll
new file mode 100644
index 00000000000000..884ec1164fc992
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/firstbitlow.ll
@@ -0,0 +1,47 @@
+; RUN: opt -S -scalarizer -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s
+
+; Make sure dxil operation function calls for firstbitlow are generated for all integer types.
+
+define noundef i32 @test_firstbitlow_short(i16 noundef %a) {
+entry:
+; CHECK: call i32 @dx.op.unaryBits.i16(i32 32, i16 %{{.*}})
+ %elt.firstbitlow = call i32 @llvm.dx.firstbitlow.i16(i16 %a)
+ ret i32 %elt.firstbitlow
+}
+
+define noundef i32 @test_firstbitlow_int(i32 noundef %a) {
+entry:
+; CHECK: call i32 @dx.op.unaryBits.i32(i32 32, i32 %{{.*}})
+ %elt.firstbitlow = call i32 @llvm.dx.firstbitlow.i32(i32 %a)
+ ret i32 %elt.firstbitlow
+}
+
+define noundef i32 @test_firstbitlow_long(i64 noundef %a) {
+entry:
+; CHECK: call i32 @dx.op.unaryBits.i64(i32 32, i64 %{{.*}})
+ %elt.firstbitlow = call i32 @llvm.dx.firstbitlow.i64(i64 %a)
+ ret i32 %elt.firstbitlow
+}
+
+define noundef <4 x i32> @test_firstbitlow_vec4_i32(<4 x i32> noundef %a) {
+entry:
+ ; CHECK: [[ee0:%.*]] = extractelement <4 x i32> %a, i64 0
+ ; CHECK: [[ie0:%.*]] = call i32 @dx.op.unaryBits.i32(i32 32, i32 [[ee0]])
+ ; CHECK: [[ee1:%.*]] = extractelement <4 x i32> %a, i64 1
+ ; CHECK: [[ie1:%.*]] = call i32 @dx.op.unaryBits.i32(i32 32, i32 [[ee1]])
+ ; CHECK: [[ee2:%.*]] = extractelement <4 x i32> %a, i64 2
+ ; CHECK: [[ie2:%.*]] = call i32 @dx.op.unaryBits.i32(i32 32, i32 [[ee2]])
+ ; CHECK: [[ee3:%.*]] = extractelement <4 x i32> %a, i64 3
+ ; CHECK: [[ie3:%.*]] = call i32 @dx.op.unaryBits.i32(i32 32, i32 [[ee3]])
+ ; CHECK: insertelement <4 x i32> poison, i32 [[ie0]], i64 0
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 [[ie1]], i64 1
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 [[ie2]], i64 2
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 [[ie3]], i64 3
+ %2 = call <4 x i32> @llvm.dx.firstbitlow.v4i32(<4 x i32> %a)
+ ret <4 x i32> %2
+}
+
+declare i32 @llvm.dx.firstbitlow.i16(i16)
+declare i32 @llvm.dx.firstbitlow.i32(i32)
+declare i32 @llvm.dx.firstbitlow.i64(i64)
+declare <4 x i32> @llvm.dx.firstbitlow.v4i32(<4 x i32>)
diff --git a/llvm/test/CodeGen/DirectX/firstbitlow_error.ll b/llvm/test/CodeGen/DirectX/firstbitlow_error.ll
new file mode 100644
index 00000000000000..d8b9333067f4ac
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/firstbitlow_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library %s 2>&1 | FileCheck %s
+
+; DXIL operation firstbitshigh does not support double overload type
+; CHECK: invalid intrinsic signature
+
+define noundef double @firstbitlow_double(double noundef %a) {
+entry:
+ %1 = call double @llvm.dx.firstbitlow.f64(double %a)
+ ret double %1
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll
new file mode 100644
index 00000000000000..9ebd8cc511eb6c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll
@@ -0,0 +1,104 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: [[glsl_450_ext:%.+]] = OpExtInstImport "GLSL.std.450"
+; CHECK-DAG: OpMemoryModel Logical GLSL450
+; CHECK-DAG: [[u32_t:%.+]] = OpTypeInt 32 0
+; CHECK-DAG: [[u32x2_t:%.+]] = OpTypeVector [[u32_t]] 2
+; CHECK-DAG: [[u32x4_t:%.+]] = OpTypeVector [[u32_t]] 4
+; CHECK-DAG: [[const_zero:%.*]] = OpConstant [[u32_t]] 0
+; CHECK-DAG: [[const_zerox2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_zero]] [[const_zero]]
+; CHECK-DAG: [[const_one:%.*]] = OpConstant [[u32_t]] 1
+; CHECK-DAG: [[const_thirty_two:%.*]] = OpConstant [[u32_t]] 32
+; CHECK-DAG: [[const_thirty_twox2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_thirty_two]] [[const_thirty_two]]
+; CHECK-DAG: [[const_neg_one:%.*]] = OpConstant [[u32_t]] 4294967295
+; CHECK-DAG: [[const_neg_onex2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_neg_one]] [[const_neg_one]]
+; CHECK-DAG: [[u16_t:%.+]] = OpTypeInt 16 0
+; CHECK-DAG: [[u16x2_t:%.+]] = OpTypeVector [[u16_t]] 2
+; CHECK-DAG: [[u64_t:%.+]] = OpTypeInt 64 0
+; CHECK-DAG: [[u64x2_t:%.+]] = OpTypeVector [[u64_t]] 2
+; CHECK-DAG: [[bool_t:%.+]] = OpTypeBool
+; CHECK-DAG: [[boolx2_t:%.+]] = OpTypeVector [[bool_t]] 2
+
+; CHECK-LABEL: Begin function firstbitlow_i32
+define noundef i32 @firstbitlow_i32(i32 noundef %a) {
+entry:
+; CHECK: [[a:%.+]] = OpFunctionParameter [[u32_t]]
+; CHECK: [[ret:%.+]] = OpExtInst [[u32_t]] [[glsl_450_ext]] FindILsb [[a]]
+; CHECK: OpReturnValue [[ret]]
+ %elt.firstbitlow = call i32 @llvm.spv.firstbitlow.i32(i32 %a)
+ ret i32 %elt.firstbitlow
+}
+
+; CHECK-LABEL: Begin function firstbitlow_2xi32
+define noundef <2 x i32> @firstbitlow_2xi32(<2 x i32> noundef %a) {
+entry:
+; CHECK: [[a:%.+]] = OpFunctionParameter [[u32x2_t]]
+; CHECK: [[ret:%.+]] = OpExtInst [[u32x2_t]] [[glsl_450_ext]] FindILsb [[a]]
+; CHECK: OpReturnValue [[ret]]
+ %elt.firstbitlow = call <2 x i32> @llvm.spv.firstbitlow.v2i32(<2 x i32> %a)
+ ret <2 x i32> %elt.firstbitlow
+}
+
+; CHECK-LABEL: Begin function firstbitlow_i16
+define noundef i32 @firstbitlow_i16(i16 noundef %a) {
+entry:
+; CHECK: [[a16:%.+]] = OpFunctionParameter [[u16_t]]
+; CHECK: [[a32:%.+]] = OpUConvert [[u32_t]] [[a16]]
+; CHECK: [[ret:%.+]] = OpExtInst [[u32_t]] [[glsl_450_ext]] FindILsb [[a32]]
+; CHECK: OpReturnValue [[ret]]
+ %elt.firstbitlow = call i32 @llvm.spv.firstbitlow.i16(i16 %a)
+ ret i32 %elt.firstbitlow
+}
+
+; CHECK-LABEL: Begin function firstbitlow_v2i16
+define noundef <2 x i32> @firstbitlow_v2i16(<2 x i16> noundef %a) {
+entry:
+; CHECK: [[a16:%.+]] = OpFunctionParameter [[u16x2_t]]
+; CHECK: [[a32:%.+]] = OpUConvert [[u32x2_t]] [[a16]]
+; CHECK: [[ret:%.+]] = OpExtInst [[u32x2_t]] [[glsl_450_ext]] FindILsb [[a32]]
+; CHECK: OpReturnValue [[ret]]
+ %elt.firstbitlow = call <2 x i32> @llvm.spv.firstbitlow.v2i16(<2 x i16> %a)
+ ret <2 x i32> %elt.firstbitlow
+}
+
+; CHECK-LABEL: Begin function firstbitlow_i64
+define noundef i32 @firstbitlow_i64(i64 noundef %a) {
+entry:
+; CHECK: [[a64:%.+]] = OpFunctionParameter [[u64_t]]
+; CHECK: [[a32x2:%.+]] = OpBitcast [[u32x2_t]] [[a64]]
+; CHECK: [[lsb_bits:%.+]] = OpExtInst [[u32x2_t]] [[glsl_450_ext]] FindILsb [[a32x2]]
+; CHECK: [[high_bits:%.+]] = OpVectorExtractDynamic [[u32_t]] [[lsb_bits]] [[const_zero]]
+; CHECK: [[low_bits:%.+]] = OpVectorExtractDynamic [[u32_t]] [[lsb_bits]] [[const_one]]
+; CHECK: [[should_use_high:%.+]] = OpIEqual [[bool_t]] [[low_bits]] [[const_neg_one]]
+; CHECK: [[ans_bits:%.+]] = OpSelect [[u32_t]] [[should_use_high]] [[high_bits]] [[low_bits]]
+; CHECK: [[ans_offset:%.+]] = OpSelect [[u32_t]] [[should_use_high]] [[const_thirty_two]] [[const_zero]]
+; CHECK: [[ret:%.+]] = OpIAdd [[u32_t]] [[ans_offset]] [[ans_bits]]
+; CHECK: OpReturnValue [[ret]]
+ %elt.firstbitlow = call i32 @llvm.spv.firstbitlow.i64(i64 %a)
+ ret i32 %elt.firstbitlow
+}
+
+; CHECK-LABEL: Begin function firstbitlow_v2i64
+define noundef <2 x i32> @firstbitlow_v2i64(<2 x i64> noundef %a) {
+entry:
+; CHECK: [[a64x2:%.+]] = OpFunctionParameter [[u64x2_t]]
+; CHECK: [[a32x4:%.+]] = OpBitcast [[u32x4_t]] [[a64x2]]
+; CHECK: [[lsb_bits:%.+]] = OpExtInst [[u32x4_t]] [[glsl_450_ext]] FindILsb [[a32x4]]
+; CHECK: [[high_bits:%.+]] = OpVectorShuffle [[u32x2_t]] [[lsb_bits]] [[lsb_bits]] 0 2
+; CHECK: [[low_bits:%.+]] = OpVectorShuffle [[u32x2_t]] [[lsb_bits]] [[lsb_bits]] 1 3
+; CHECK: [[should_use_high:%.+]] = OpIEqual [[boolx2_t]] [[low_bits]] [[const_neg_onex2]]
+; CHECK: [[ans_bits:%.+]] = OpSelect [[u32x2_t]] [[should_use_high]] [[high_bits]] [[low_bits]]
+; CHECK: [[ans_offset:%.+]] = OpSelect [[u32x2_t]] [[should_use_high]] [[const_thirty_twox2]] [[const_zerox2]]
+; CHECK: [[ret:%.+]] = OpIAdd [[u32x2_t]] [[ans_offset]] [[ans_bits]]
+; CHECK: OpReturnValue [[ret]]
+ %elt.firstbitlow = call <2 x i32> @llvm.spv.firstbitlow.v2i64(<2 x i64> %a)
+ ret <2 x i32> %elt.firstbitlow
+}
+
+;declare i16 @llvm.spv.firstbitlow.i16(i16)
+;declare i32 @llvm.spv.firstbitlow.i32(i32)
+;declare i64 @llvm.spv.firstbitlow.i64(i64)
+;declare i16 @llvm.spv.firstbitlow.v2i16(<2 x i16>)
+;declare i32 @llvm.spv.firstbitlow.v2i32(<2 x i32>)
+;declare i64 @llvm.spv.firstbitlow.v2i64(<2 x i64>)
>From 6480d1546351627b824f2241ea427ac9d5980d6e Mon Sep 17 00:00:00 2001
From: Ashley Coleman <ascoleman at microsoft.com>
Date: Tue, 19 Nov 2024 10:35:52 -0700
Subject: [PATCH 2/3] format
---
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 42 +++++++++++--------
1 file changed, 24 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 8c2336df3e5b3b..77aa69e6e463ee 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -3180,8 +3180,8 @@ bool SPIRVInstructionSelector::selectFirstBitLow16(Register ResVReg,
// to an unsigned i32. As this leaves all the least significant bits unchanged
// the first set bit from the LSB side doesn't change.
Register ExtReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
- bool Result = selectNAryOpWithSrcs(ExtReg, ResType, I, {I.getOperand(2).getReg()},
- SPIRV::OpUConvert);
+ bool Result = selectNAryOpWithSrcs(
+ ExtReg, ResType, I, {I.getOperand(2).getReg()}, SPIRV::OpUConvert);
return Result && selectFirstBitLow32(ResVReg, ResType, I, ExtReg);
}
@@ -3209,7 +3209,8 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
MachineIRBuilder MIRBuilder(I);
SPIRVType *PostCastType =
GR.getOrCreateSPIRVVectorType(BaseType, 2 * ComponentCount, MIRBuilder);
- Register BitcastReg = MRI->createVirtualRegister(GR.getRegClass(PostCastType));
+ Register BitcastReg =
+ MRI->createVirtualRegister(GR.getRegClass(PostCastType));
bool Result =
selectUnOpWithSrc(BitcastReg, PostCastType, I, OpReg, SPIRV::OpBitcast);
@@ -3225,14 +3226,18 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
bool IsScalarRes = ResType->getOpcode() != SPIRV::OpTypeVector;
if (IsScalarRes) {
// if scalar do a vector extract
- Result = Result && selectNAryOpWithSrcs(
- HighReg, ResType, I,
- {FBLReg, GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull)},
- SPIRV::OpVectorExtractDynamic);
- Result = Result && selectNAryOpWithSrcs(
- LowReg, ResType, I,
- {FBLReg, GR.getOrCreateConstInt(1, I, ResType, TII, ZeroAsNull)},
- SPIRV::OpVectorExtractDynamic);
+ Result =
+ Result &&
+ selectNAryOpWithSrcs(
+ HighReg, ResType, I,
+ {FBLReg, GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull)},
+ SPIRV::OpVectorExtractDynamic);
+ Result =
+ Result &&
+ selectNAryOpWithSrcs(
+ LowReg, ResType, I,
+ {FBLReg, GR.getOrCreateConstInt(1, I, ResType, TII, ZeroAsNull)},
+ SPIRV::OpVectorExtractDynamic);
} else {
// if vector do a shufflevector
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
@@ -3280,7 +3285,8 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
SelectOp = SPIRV::OpSelectSISCond;
AddOp = SPIRV::OpIAddS;
} else {
- BoolType = GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
+ BoolType =
+ GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
NegOneReg =
GR.getOrCreateConstVector((unsigned)-1, I, ResType, TII, ZeroAsNull);
Reg0 = GR.getOrCreateConstVector(0, I, ResType, TII, ZeroAsNull);
@@ -3291,18 +3297,18 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
// Check if the low bits are == -1; true if -1
Register BReg = MRI->createVirtualRegister(GR.getRegClass(BoolType));
- Result = Result && selectNAryOpWithSrcs(BReg, BoolType, I, {LowReg, NegOneReg},
- SPIRV::OpIEqual);
+ Result = Result && selectNAryOpWithSrcs(BReg, BoolType, I,
+ {LowReg, NegOneReg}, SPIRV::OpIEqual);
// Select high bits if true in BReg, otherwise low bits
Register TmpReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
- Result = Result && selectNAryOpWithSrcs(TmpReg, ResType, I, {BReg, HighReg, LowReg},
- SelectOp);
+ Result = Result && selectNAryOpWithSrcs(TmpReg, ResType, I,
+ {BReg, HighReg, LowReg}, SelectOp);
// Add 32 for high bits, 0 for low bits
Register ValReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
- Result = Result &&
- selectNAryOpWithSrcs(ValReg, ResType, I, {BReg, Reg32, Reg0}, SelectOp);
+ Result = Result && selectNAryOpWithSrcs(ValReg, ResType, I,
+ {BReg, Reg32, Reg0}, SelectOp);
return Result &&
selectNAryOpWithSrcs(ResVReg, ResType, I, {ValReg, TmpReg}, AddOp);
>From 417133d1b0dd3f5c7dd5db65942e023f09e74e42 Mon Sep 17 00:00:00 2001
From: Ashley Coleman <ascoleman at microsoft.com>
Date: Tue, 19 Nov 2024 11:56:07 -0700
Subject: [PATCH 3/3] cleanup
---
llvm/include/llvm/IR/IntrinsicsDirectX.td | 1 -
llvm/lib/Target/DirectX/DXIL.td | 1 -
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 11 ++++----
.../SPIRV/hlsl-intrinsics/firstbitlow.ll | 26 +++++++++----------
4 files changed, 18 insertions(+), 21 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index f2f393a02d19c7..6148dcaff64470 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -103,6 +103,5 @@ def int_dx_splitdouble : DefaultAttrsIntrinsic<[llvm_anyint_ty, LLVMMatchType<0>
def int_dx_radians : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
def int_dx_firstbituhigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
def int_dx_firstbitshigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
-// TODO: check this against the spec
def int_dx_firstbitlow : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
}
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 5b65d45a742d9d..efdd91c9c27f62 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -573,7 +573,6 @@ def FirstbitLo : DXILOp<32, unaryBits> {
let overloads =
[Overloads<DXIL1_0, [Int16Ty, Int32Ty, Int64Ty]>];
let stages = [Stages<DXIL1_0, [all_stages]>];
- // TODO: check these
let attributes = [Attributes<DXIL1_0, [ReadNone]>];
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 77aa69e6e463ee..c033f04305d0f0 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -2799,7 +2799,6 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_firstbitshigh: // There is no CL equivalent of FindSMsb
return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/true);
case Intrinsic::spv_firstbitlow: // There is no CL equivlent of FindILsb
- // (true?)
return selectFirstBitLow(ResVReg, ResType, I);
case Intrinsic::spv_group_memory_barrier_with_group_sync: {
bool Result = true;
@@ -3248,7 +3247,7 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
// Per the spec, repeat the vector if only one vec is needed
.addUse(FBLReg);
- // high bits are store in even indexes. Extract them from FBLReg
+ // high bits are stored in even indexes. Extract them from FBLReg
for (unsigned j = 0; j < ComponentCount * 2; j += 2) {
MIB.addImm(j);
}
@@ -3262,14 +3261,14 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
// Per the spec, repeat the vector if only one vec is needed
.addUse(FBLReg);
- // low bits are store in odd indexes. Extract them from FBLReg
+ // low bits are stored in odd indexes. Extract them from FBLReg
for (unsigned j = 1; j < ComponentCount * 2; j += 2) {
MIB.addImm(j);
}
Result = Result && MIB.constrainAllUses(TII, TRI, RBI);
}
- // 4. Check if result of each bottom 32 bits is == -1
+ // 4. Check the result. When low bits == -1 use high, otherwise use low
SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
Register NegOneReg;
Register Reg0;
@@ -3295,7 +3294,7 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
AddOp = SPIRV::OpIAddV;
}
- // Check if the low bits are == -1; true if -1
+ // Check if the low bits are == -1
Register BReg = MRI->createVirtualRegister(GR.getRegClass(BoolType));
Result = Result && selectNAryOpWithSrcs(BReg, BoolType, I,
{LowReg, NegOneReg}, SPIRV::OpIEqual);
@@ -3305,7 +3304,7 @@ bool SPIRVInstructionSelector::selectFirstBitLow64(Register ResVReg,
Result = Result && selectNAryOpWithSrcs(TmpReg, ResType, I,
{BReg, HighReg, LowReg}, SelectOp);
- // Add 32 for high bits, 0 for low bits
+ // 5. Add 32 when high bits are used, otherwise 0 for low bits
Register ValReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
Result = Result && selectNAryOpWithSrcs(ValReg, ResType, I,
{BReg, Reg32, Reg0}, SelectOp);
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll
index 9ebd8cc511eb6c..05488479e5bd0f 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/firstbitlow.ll
@@ -6,13 +6,13 @@
; CHECK-DAG: [[u32_t:%.+]] = OpTypeInt 32 0
; CHECK-DAG: [[u32x2_t:%.+]] = OpTypeVector [[u32_t]] 2
; CHECK-DAG: [[u32x4_t:%.+]] = OpTypeVector [[u32_t]] 4
-; CHECK-DAG: [[const_zero:%.*]] = OpConstant [[u32_t]] 0
-; CHECK-DAG: [[const_zerox2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_zero]] [[const_zero]]
-; CHECK-DAG: [[const_one:%.*]] = OpConstant [[u32_t]] 1
-; CHECK-DAG: [[const_thirty_two:%.*]] = OpConstant [[u32_t]] 32
-; CHECK-DAG: [[const_thirty_twox2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_thirty_two]] [[const_thirty_two]]
-; CHECK-DAG: [[const_neg_one:%.*]] = OpConstant [[u32_t]] 4294967295
-; CHECK-DAG: [[const_neg_onex2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_neg_one]] [[const_neg_one]]
+; CHECK-DAG: [[const_0:%.*]] = OpConstant [[u32_t]] 0
+; CHECK-DAG: [[const_0x2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_0]] [[const_0]]
+; CHECK-DAG: [[const_1:%.*]] = OpConstant [[u32_t]] 1
+; CHECK-DAG: [[const_32:%.*]] = OpConstant [[u32_t]] 32
+; CHECK-DAG: [[const_32x2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_32]] [[const_32]]
+; CHECK-DAG: [[const_neg1:%.*]] = OpConstant [[u32_t]] 4294967295
+; CHECK-DAG: [[const_neg1x2:%.*]] = OpConstantComposite [[u32x2_t]] [[const_neg1]] [[const_neg1]]
; CHECK-DAG: [[u16_t:%.+]] = OpTypeInt 16 0
; CHECK-DAG: [[u16x2_t:%.+]] = OpTypeVector [[u16_t]] 2
; CHECK-DAG: [[u64_t:%.+]] = OpTypeInt 64 0
@@ -68,11 +68,11 @@ entry:
; CHECK: [[a64:%.+]] = OpFunctionParameter [[u64_t]]
; CHECK: [[a32x2:%.+]] = OpBitcast [[u32x2_t]] [[a64]]
; CHECK: [[lsb_bits:%.+]] = OpExtInst [[u32x2_t]] [[glsl_450_ext]] FindILsb [[a32x2]]
-; CHECK: [[high_bits:%.+]] = OpVectorExtractDynamic [[u32_t]] [[lsb_bits]] [[const_zero]]
-; CHECK: [[low_bits:%.+]] = OpVectorExtractDynamic [[u32_t]] [[lsb_bits]] [[const_one]]
-; CHECK: [[should_use_high:%.+]] = OpIEqual [[bool_t]] [[low_bits]] [[const_neg_one]]
+; CHECK: [[high_bits:%.+]] = OpVectorExtractDynamic [[u32_t]] [[lsb_bits]] [[const_0]]
+; CHECK: [[low_bits:%.+]] = OpVectorExtractDynamic [[u32_t]] [[lsb_bits]] [[const_1]]
+; CHECK: [[should_use_high:%.+]] = OpIEqual [[bool_t]] [[low_bits]] [[const_neg1]]
; CHECK: [[ans_bits:%.+]] = OpSelect [[u32_t]] [[should_use_high]] [[high_bits]] [[low_bits]]
-; CHECK: [[ans_offset:%.+]] = OpSelect [[u32_t]] [[should_use_high]] [[const_thirty_two]] [[const_zero]]
+; CHECK: [[ans_offset:%.+]] = OpSelect [[u32_t]] [[should_use_high]] [[const_32]] [[const_0]]
; CHECK: [[ret:%.+]] = OpIAdd [[u32_t]] [[ans_offset]] [[ans_bits]]
; CHECK: OpReturnValue [[ret]]
%elt.firstbitlow = call i32 @llvm.spv.firstbitlow.i64(i64 %a)
@@ -87,9 +87,9 @@ entry:
; CHECK: [[lsb_bits:%.+]] = OpExtInst [[u32x4_t]] [[glsl_450_ext]] FindILsb [[a32x4]]
; CHECK: [[high_bits:%.+]] = OpVectorShuffle [[u32x2_t]] [[lsb_bits]] [[lsb_bits]] 0 2
; CHECK: [[low_bits:%.+]] = OpVectorShuffle [[u32x2_t]] [[lsb_bits]] [[lsb_bits]] 1 3
-; CHECK: [[should_use_high:%.+]] = OpIEqual [[boolx2_t]] [[low_bits]] [[const_neg_onex2]]
+; CHECK: [[should_use_high:%.+]] = OpIEqual [[boolx2_t]] [[low_bits]] [[const_neg1x2]]
; CHECK: [[ans_bits:%.+]] = OpSelect [[u32x2_t]] [[should_use_high]] [[high_bits]] [[low_bits]]
-; CHECK: [[ans_offset:%.+]] = OpSelect [[u32x2_t]] [[should_use_high]] [[const_thirty_twox2]] [[const_zerox2]]
+; CHECK: [[ans_offset:%.+]] = OpSelect [[u32x2_t]] [[should_use_high]] [[const_32x2]] [[const_0x2]]
; CHECK: [[ret:%.+]] = OpIAdd [[u32x2_t]] [[ans_offset]] [[ans_bits]]
; CHECK: OpReturnValue [[ret]]
%elt.firstbitlow = call <2 x i32> @llvm.spv.firstbitlow.v2i64(<2 x i64> %a)
More information about the cfe-commits
mailing list