[llvm] 3f53300 - AMDGPU: Emit llvm.fshr for __builtin_amdgcn_alignbit

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 23 13:51:33 PDT 2020


Author: Matt Arsenault
Date: 2020-03-23T16:51:25-04:00
New Revision: 3f533006ba8c8ae6f3596f49f480aa794ed4e347

URL: https://github.com/llvm/llvm-project/commit/3f533006ba8c8ae6f3596f49f480aa794ed4e347
DIFF: https://github.com/llvm/llvm-project/commit/3f533006ba8c8ae6f3596f49f480aa794ed4e347.diff

LOG: AMDGPU: Emit llvm.fshr for __builtin_amdgcn_alignbit

These are equivalent. The generic rotate builtins do not directly map
to the fshr intrinsic.

Added: 
    

Modified: 
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/CodeGenOpenCL/builtins-amdgcn.cl
    llvm/include/llvm/IR/IntrinsicsAMDGPU.td

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 8d86424c60a9..754d95d1ab81 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -13609,6 +13609,13 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
   case AMDGPU::BI__builtin_r600_read_tidig_z:
     return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
+  case AMDGPU::BI__builtin_amdgcn_alignbit: {
+    llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+    llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+    llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+    Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
+    return Builder.CreateCall(F, { Src0, Src1, Src2 });
+  }
   default:
     return nullptr;
   }

diff  --git a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
index 85e921cbe12a..0aa3e4144c52 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
@@ -596,7 +596,7 @@ kernel void test_mbcnt_hi(global uint* out, uint src0, uint src1) {
 }
 
 // CHECK-LABEL: @test_alignbit(
-// CHECK: tail call i32 @llvm.amdgcn.alignbit(i32 %src0, i32 %src1, i32 %src2)
+// CHECK: tail call i32 @llvm.fshr.i32(i32 %src0, i32 %src1, i32 %src2)
 kernel void test_alignbit(global uint* out, uint src0, uint src1, uint src2) {
   *out = __builtin_amdgcn_alignbit(src0, src1, src2);
 }

diff  --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 3f962cc667c5..c01db52b1622 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1371,8 +1371,8 @@ def int_amdgcn_writelane :
   [IntrNoMem, IntrConvergent]
 >;
 
-def int_amdgcn_alignbit :
-  GCCBuiltin<"__builtin_amdgcn_alignbit">, Intrinsic<[llvm_i32_ty],
+// FIXME: Deprecated. This is equivalent to llvm.fshr
+def int_amdgcn_alignbit : Intrinsic<[llvm_i32_ty],
   [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
   [IntrNoMem, IntrSpeculatable]
 >;


        


More information about the llvm-commits mailing list