[clang] [llvm] AMDGPU: Add v_permlane16_swap_b32 and v_permlane32_swap_b32 for gfx950 (PR #117260)
Matt Arsenault via cfe-commits
cfe-commits at lists.llvm.org
Fri Nov 22 13:09:41 PST 2024
https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/117260
>From d008e0375c6c16e00ff091d0e2eaeb007ccbce77 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 22 Jan 2024 12:40:54 +0700
Subject: [PATCH] AMDGPU: Add v_permlane16_swap_b32 and v_permlane32_swap_b32
for gfx950
This was a bit annoying because these introduce a new special case
encoding usage. op_sel is repurposed as a subset of dpp controls,
and is eligible for VOP3->VOP1 shrinking. For some reason fi also
uses an enum value, so we need to convert the raw boolean to 1 instead
of -1.
The 2 registers are swapped, so this has 2 defs. Ideally the builtin
would return a pair, but that's difficult so return a vector instead.
This would make a hypothetical builtin that supports v2f16 directly
uglier.
---
clang/include/clang/Basic/BuiltinsAMDGPU.def | 3 +
clang/lib/CodeGen/CGBuiltin.cpp | 26 ++++
clang/test/CodeGenOpenCL/amdgpu-features.cl | 2 +-
.../builtins-amdgcn-gfx950-err.cl | 6 +-
.../CodeGenOpenCL/builtins-amdgcn-gfx950.cl | 87 +++++++++++++
.../builtins-amdgcn-error-gfx950-param.cl | 10 ++
.../builtins-amdgcn-error-gfx950.cl | 5 +-
llvm/docs/AMDGPUUsage.rst | 13 ++
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 14 ++
llvm/lib/Target/AMDGPU/AMDGPU.td | 23 +++-
llvm/lib/Target/AMDGPU/AMDGPUGISel.td | 3 +
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 25 ++++
.../AMDGPU/AMDGPUInstructionSelector.cpp | 32 +++++
.../Target/AMDGPU/AMDGPUInstructionSelector.h | 3 +
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 9 ++
.../Target/AMDGPU/AMDGPUSearchableTables.td | 2 +
llvm/lib/Target/AMDGPU/GCNSubtarget.h | 5 +
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 6 +-
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 4 +
llvm/lib/Target/AMDGPU/VOP1Instructions.td | 46 +++++++
llvm/lib/Target/AMDGPU/VOPInstructions.td | 12 ++
llvm/lib/TargetParser/TargetParser.cpp | 2 +
.../UniformityAnalysis/AMDGPU/intrinsics.ll | 16 +++
.../AMDGPU/llvm.amdgcn.permlane16.swap.ll | 121 ++++++++++++++++++
.../AMDGPU/llvm.amdgcn.permlane32.swap.ll | 121 ++++++++++++++++++
llvm/test/MC/AMDGPU/gfx950_asm_features.s | 82 ++++++++++++
llvm/test/MC/AMDGPU/gfx950_err.s | 31 +++++
llvm/test/MC/Disassembler/AMDGPU/gfx950.txt | 32 +++++
28 files changed, 736 insertions(+), 5 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane16.swap.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane32.swap.ll
create mode 100644 llvm/test/MC/AMDGPU/gfx950_err.s
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 51a5b1dbad495c..548bcc8ad55f48 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -459,6 +459,9 @@ TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x64_bf8_fp8, "V16fV4iV8iV16fiIiI
TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x64_fp8_bf8, "V16fV4iV8iV16fiIiIi", "nc", "gfx950-insts")
TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x64_fp8_fp8, "V16fV4iV8iV16fiIiIi", "nc", "gfx950-insts")
+TARGET_BUILTIN(__builtin_amdgcn_permlane16_swap, "V2UiUiUiIbIb", "nc", "permlane16-swap")
+TARGET_BUILTIN(__builtin_amdgcn_permlane32_swap, "V2UiUiUiIbIb", "nc", "permlane32-swap")
+
//===----------------------------------------------------------------------===//
// GFX12+ only builtins.
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index e9c9be907e31df..8f754953d28998 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -20200,6 +20200,32 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::amdgcn_s_sendmsg_rtn, {ResultType});
return Builder.CreateCall(F, {Arg});
}
+ case AMDGPU::BI__builtin_amdgcn_permlane16_swap:
+ case AMDGPU::BI__builtin_amdgcn_permlane32_swap: {
+ // Because builtin types are limited, and the intrinsic uses a struct/pair
+ // output, marshal the pair-of-i32 to <2 x i32>.
+ Value *VDstOld = EmitScalarExpr(E->getArg(0));
+ Value *VSrcOld = EmitScalarExpr(E->getArg(1));
+ Value *FI = EmitScalarExpr(E->getArg(2));
+ Value *BoundCtrl = EmitScalarExpr(E->getArg(3));
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == AMDGPU::BI__builtin_amdgcn_permlane16_swap
+ ? Intrinsic::amdgcn_permlane16_swap
+ : Intrinsic::amdgcn_permlane32_swap);
+ llvm::CallInst *Call =
+ Builder.CreateCall(F, {VDstOld, VSrcOld, FI, BoundCtrl});
+
+ llvm::Value *Elt0 = Builder.CreateExtractValue(Call, 0);
+ llvm::Value *Elt1 = Builder.CreateExtractValue(Call, 1);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+
+ llvm::Value *Insert0 = Builder.CreateInsertElement(
+ llvm::PoisonValue::get(ResultType), Elt0, UINT64_C(0));
+ llvm::Value *AsVector =
+ Builder.CreateInsertElement(Insert0, Elt1, UINT64_C(1));
+ return AsVector;
+ }
case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc:
return emitBuiltinWithOneOverloadedType<4>(
*this, E, Intrinsic::amdgcn_make_buffer_rsrc);
diff --git a/clang/test/CodeGenOpenCL/amdgpu-features.cl b/clang/test/CodeGenOpenCL/amdgpu-features.cl
index 61cbf5e65d0d21..f9e07fbc6b0480 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-features.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-features.cl
@@ -89,7 +89,7 @@
// GFX941: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot1-insts,+dot10-insts,+dot2-insts,+dot3-insts,+dot4-insts,+dot5-insts,+dot6-insts,+dot7-insts,+dpp,+fp8-conversion-insts,+fp8-insts,+gfx8-insts,+gfx9-insts,+gfx90a-insts,+gfx940-insts,+mai-insts,+s-memrealtime,+s-memtime-inst,+wavefrontsize64,+xf32-insts"
// GFX942: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot1-insts,+dot10-insts,+dot2-insts,+dot3-insts,+dot4-insts,+dot5-insts,+dot6-insts,+dot7-insts,+dpp,+fp8-conversion-insts,+fp8-insts,+gfx8-insts,+gfx9-insts,+gfx90a-insts,+gfx940-insts,+mai-insts,+s-memrealtime,+s-memtime-inst,+wavefrontsize64,+xf32-insts"
// GFX9_4_Generic: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot1-insts,+dot10-insts,+dot2-insts,+dot3-insts,+dot4-insts,+dot5-insts,+dot6-insts,+dot7-insts,+dpp,+gfx8-insts,+gfx9-insts,+gfx90a-insts,+gfx940-insts,+mai-insts,+s-memrealtime,+s-memtime-inst,+wavefrontsize64"
-// GFX950: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot1-insts,+dot10-insts,+dot2-insts,+dot3-insts,+dot4-insts,+dot5-insts,+dot6-insts,+dot7-insts,+dpp,+fp8-conversion-insts,+fp8-insts,+gfx8-insts,+gfx9-insts,+gfx90a-insts,+gfx940-insts,+gfx950-insts,+mai-insts,+prng-inst,+s-memrealtime,+s-memtime-inst,+wavefrontsize64"
+// GFX950: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot1-insts,+dot10-insts,+dot2-insts,+dot3-insts,+dot4-insts,+dot5-insts,+dot6-insts,+dot7-insts,+dpp,+fp8-conversion-insts,+fp8-insts,+gfx8-insts,+gfx9-insts,+gfx90a-insts,+gfx940-insts,+gfx950-insts,+mai-insts,+permlane16-swap,+permlane32-swap,+prng-inst,+s-memrealtime,+s-memtime-inst,+wavefrontsize64"
// GFX1010: "target-features"="+16-bit-insts,+ci-insts,+dl-insts,+dpp,+gfx10-insts,+gfx8-insts,+gfx9-insts,+s-memrealtime,+s-memtime-inst,+wavefrontsize32"
// GFX1011: "target-features"="+16-bit-insts,+ci-insts,+dl-insts,+dot1-insts,+dot10-insts,+dot2-insts,+dot5-insts,+dot6-insts,+dot7-insts,+dpp,+gfx10-insts,+gfx8-insts,+gfx9-insts,+s-memrealtime,+s-memtime-inst,+wavefrontsize32"
// GFX1012: "target-features"="+16-bit-insts,+ci-insts,+dl-insts,+dot1-insts,+dot10-insts,+dot2-insts,+dot5-insts,+dot6-insts,+dot7-insts,+dpp,+gfx10-insts,+gfx8-insts,+gfx9-insts,+s-memrealtime,+s-memtime-inst,+wavefrontsize32"
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl
index 86f4f73c81c0fc..5b75ee417e545b 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl
@@ -11,6 +11,10 @@
// REQUIRES: amdgpu-registered-target
typedef unsigned int uint;
-void test_prng_b32(global uint* out, uint a) {
+typedef unsigned int uint2 __attribute__((ext_vector_type(2)));
+
+void test(global uint* out, global uint2* out_v2u32, uint a, uint b) {
*out = __builtin_amdgcn_prng_b32(a); // expected-error{{'__builtin_amdgcn_prng_b32' needs target feature prng-inst}}
+ *out_v2u32 = __builtin_amdgcn_permlane16_swap(a, b, false, false); // expected-error{{'__builtin_amdgcn_permlane16_swap' needs target feature permlane16-swap}}
+ *out_v2u32 = __builtin_amdgcn_permlane32_swap(a, b, false, false); // expected-error{{'__builtin_amdgcn_permlane32_swap' needs target feature permlane32-swap}}
}
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
index f31ba85a52a7ad..49f85982faf5a5 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
@@ -3,6 +3,7 @@
// REQUIRES: amdgpu-registered-target
typedef unsigned int uint;
+typedef unsigned int __attribute__((ext_vector_type(2))) uint2;
// CHECK-LABEL: @test_prng_b32(
// CHECK-NEXT: entry:
@@ -19,3 +20,89 @@ typedef unsigned int uint;
void test_prng_b32(global uint* out, uint a) {
*out = __builtin_amdgcn_prng_b32(a);
}
+
+// CHECK-LABEL: @test_permlane16_swap(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OLD_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store i32 [[OLD:%.*]], ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: store i32 [[SRC:%.*]], ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 [[TMP0]], i32 [[TMP1]], i1 false, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
+// CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> poison, i32 [[TMP3]], i64 0
+// CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> [[TMP5]], i32 [[TMP4]], i64 1
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x i32> [[TMP6]], ptr addrspace(1) [[TMP7]], align 8
+// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 [[TMP8]], i32 [[TMP9]], i1 true, i1 false)
+// CHECK-NEXT: [[TMP11:%.*]] = extractvalue { i32, i32 } [[TMP10]], 0
+// CHECK-NEXT: [[TMP12:%.*]] = extractvalue { i32, i32 } [[TMP10]], 1
+// CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i64 0
+// CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP12]], i64 1
+// CHECK-NEXT: [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x i32> [[TMP14]], ptr addrspace(1) [[TMP15]], align 8
+// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP18:%.*]] = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 [[TMP16]], i32 [[TMP17]], i1 false, i1 true)
+// CHECK-NEXT: [[TMP19:%.*]] = extractvalue { i32, i32 } [[TMP18]], 0
+// CHECK-NEXT: [[TMP20:%.*]] = extractvalue { i32, i32 } [[TMP18]], 1
+// CHECK-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> poison, i32 [[TMP19]], i64 0
+// CHECK-NEXT: [[TMP22:%.*]] = insertelement <2 x i32> [[TMP21]], i32 [[TMP20]], i64 1
+// CHECK-NEXT: [[TMP23:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x i32> [[TMP22]], ptr addrspace(1) [[TMP23]], align 8
+// CHECK-NEXT: ret void
+//
+void test_permlane16_swap(global uint2* out, uint old, uint src) {
+ *out = __builtin_amdgcn_permlane16_swap(old, src, false, false);
+ *out = __builtin_amdgcn_permlane16_swap(old, src, true, false);
+ *out = __builtin_amdgcn_permlane16_swap(old, src, false, true);
+}
+
+// CHECK-LABEL: @test_permlane32_swap(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OLD_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store i32 [[OLD:%.*]], ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: store i32 [[SRC:%.*]], ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 [[TMP0]], i32 [[TMP1]], i1 false, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
+// CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> poison, i32 [[TMP3]], i64 0
+// CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> [[TMP5]], i32 [[TMP4]], i64 1
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x i32> [[TMP6]], ptr addrspace(1) [[TMP7]], align 8
+// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 [[TMP8]], i32 [[TMP9]], i1 true, i1 false)
+// CHECK-NEXT: [[TMP11:%.*]] = extractvalue { i32, i32 } [[TMP10]], 0
+// CHECK-NEXT: [[TMP12:%.*]] = extractvalue { i32, i32 } [[TMP10]], 1
+// CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i64 0
+// CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP12]], i64 1
+// CHECK-NEXT: [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x i32> [[TMP14]], ptr addrspace(1) [[TMP15]], align 8
+// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr addrspace(5) [[OLD_ADDR]], align 4
+// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP18:%.*]] = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 [[TMP16]], i32 [[TMP17]], i1 false, i1 true)
+// CHECK-NEXT: [[TMP19:%.*]] = extractvalue { i32, i32 } [[TMP18]], 0
+// CHECK-NEXT: [[TMP20:%.*]] = extractvalue { i32, i32 } [[TMP18]], 1
+// CHECK-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> poison, i32 [[TMP19]], i64 0
+// CHECK-NEXT: [[TMP22:%.*]] = insertelement <2 x i32> [[TMP21]], i32 [[TMP20]], i64 1
+// CHECK-NEXT: [[TMP23:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x i32> [[TMP22]], ptr addrspace(1) [[TMP23]], align 8
+// CHECK-NEXT: ret void
+//
+void test_permlane32_swap(global uint2* out, uint old, uint src) {
+ *out = __builtin_amdgcn_permlane32_swap(old, src, false, false);
+ *out = __builtin_amdgcn_permlane32_swap(old, src, true, false);
+ *out = __builtin_amdgcn_permlane32_swap(old, src, false, true);
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
index b3b359a1e0c65b..2f1d312da7786c 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
@@ -148,3 +148,13 @@ void test_smfmac_f32_32x32x64_fp8_fp8(global float16* out, int4 a, int8 b, float
*out = __builtin_amdgcn_smfmac_f32_32x32x64_fp8_fp8(a, b, c, idx, d, 0); // expected-error{{argument to '__builtin_amdgcn_smfmac_f32_32x32x64_fp8_fp8' must be a constant integer}}
*out = __builtin_amdgcn_smfmac_f32_32x32x64_fp8_fp8(a, b, c, idx, 0, d); // expected-error{{argument to '__builtin_amdgcn_smfmac_f32_32x32x64_fp8_fp8' must be a constant integer}}
}
+
+void test_permlane16_swap(__global int* out, int old, int src, bool X) {
+ *out = __builtin_amdgcn_permlane16_swap(old, src, X, false); // expected-error{{argument to '__builtin_amdgcn_permlane16_swap' must be a constant integer}}
+ *out = __builtin_amdgcn_permlane16_swap(old, src, false, X); // expected-error{{argument to '__builtin_amdgcn_permlane16_swap' must be a constant integer}}
+}
+
+void test_permlane32_swap(__global int* out, int old, int src, bool X) {
+ *out = __builtin_amdgcn_permlane32_swap(old, src, X, false); // expected-error{{argument to '__builtin_amdgcn_permlane32_swap' must be a constant integer}}
+ *out = __builtin_amdgcn_permlane32_swap(old, src, false, X); // expected-error{{argument to '__builtin_amdgcn_permlane32_swap' must be a constant integer}}
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl
index 57523cf0af1b18..e0cde1d3ad87bb 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950.cl
@@ -27,7 +27,8 @@ void test(__global float4* out0, half8 a0, half8 b0, float4 c0,
__global float4* out12, int4 a12, int8 b12, float4 c12,
__global float16* out13, int4 a13, int8 b13, float16 c13,
__global float4* out14, int8 a14, int8 b14, float4 c14, int d14, int e14,
- __global float16* out15, int8 a15, int8 b15, float16 c15, int d15, int e15) {
+ __global float16* out15, int8 a15, int8 b15, float16 c15, int d15, int e15,
+ __global uint2* out16, int a16, int b16) {
*out0 = __builtin_amdgcn_mfma_f32_16x16x32_f16(a0, b0, c0, 0, 0, 0); // expected-error{{'__builtin_amdgcn_mfma_f32_16x16x32_f16' needs target feature gfx950-insts}}
*out1 = __builtin_amdgcn_mfma_f32_32x32x16_f16(a1, b1, c1, 0, 0, 0); // expected-error{{'__builtin_amdgcn_mfma_f32_32x32x16_f16' needs target feature gfx950-insts}}
*out2 = __builtin_amdgcn_mfma_f32_32x32x16_bf16(a2, b2, c2, 0, 0, 0); // expected-error{{'__builtin_amdgcn_mfma_f32_32x32x16_bf16' needs target feature gfx950-insts}}
@@ -50,4 +51,6 @@ void test(__global float4* out0, half8 a0, half8 b0, float4 c0,
*out13 = __builtin_amdgcn_smfmac_f32_32x32x64_fp8_fp8(a13, b13, c13, 0, 0, 0); // expected-error{{'__builtin_amdgcn_smfmac_f32_32x32x64_fp8_fp8' needs target feature gfx950-insts}}
*out14 = __builtin_amdgcn_mfma_scale_f32_16x16x128_f8f6f4(a14, b14, c14, 0, 0, 0, d14, 0, e14); // expected-error{{'__builtin_amdgcn_mfma_scale_f32_16x16x128_f8f6f4' needs target feature gfx950-insts}}
*out15 = __builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4(a15, b15, c15, 0, 0, 0, d15, 0, e15); // expected-error{{'__builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4' needs target feature gfx950-insts}}
+ *out16 = __builtin_amdgcn_permlane16_swap(a16, b16, false, false); // expected-error{{'__builtin_amdgcn_permlane16_swap' needs target feature permlane16-swap}}
+ *out16 = __builtin_amdgcn_permlane32_swap(a16, b16, false, false); // expected-error{{'__builtin_amdgcn_permlane32_swap' needs target feature permlane32-swap}}
}
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 161363e0dd6bcc..411a1209ef947e 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1407,6 +1407,19 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4 Emit `v_mfma_scale_f32_32x32x64_f8f6f4`
+ llvm.amdgcn.permlane16.swap Provide direct access to `v_permlane16_swap_b32` instruction on supported targets.
+ Swaps the values across lanes of first 2 operands. Odd rows of the first operand are
+ swapped with even rows of the second operand (one row is 16 lanes).
+ Returns a pair for the swapped registers. The first element of the return corresponds
+ to the swapped element of the first argument.
+
+
+ llvm.amdgcn.permlane32.swap Provide direct access to `v_permlane32_swap_b32` instruction on supported targets.
+ Swaps the values across lanes of first 2 operands. Rows 2 and 3 of the first operand are
+ swapped with rows 0 and 1 of the second operand (one row is 16 lanes).
+ Returns a pair for the swapped registers. The first element of the return
+ corresponds to the swapped element of the first argument.
+
============================================== ==========================================================
.. TODO::
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index f9a4f1899cc606..73599851f50003 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3166,6 +3166,20 @@ def int_amdgcn_smfmac_f32_32x32x64_fp8_bf8 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_
def int_amdgcn_smfmac_f32_32x32x64_fp8_fp8 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
}
+// { vdst_new, vsrc_new } llvm.amdgcn.permlane16.swap <vdst_old> <vsrc_old> <fi> <bound_control>
+def int_amdgcn_permlane16_swap :
+ Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i1_ty, llvm_i1_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, IntrNoCallback, IntrNoFree]>;
+
+// { vdst_new, vsrc_new } llvm.amdgcn.permlane32.swap <vdst_old> <vsrc_old> <fi> <bound_control>
+def int_amdgcn_permlane32_swap :
+ Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i1_ty, llvm_i1_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, IntrNoCallback, IntrNoFree]>;
+
//===----------------------------------------------------------------------===//
// Special Intrinsics for backend internal use only. No frontend
// should emit calls to these.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index d3543015d667f9..60b1e58832dad4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -372,10 +372,23 @@ def FeatureGFX940Insts : SubtargetFeature<"gfx940-insts",
"Additional instructions for GFX940+"
>;
+def FeaturePermlane16Swap : SubtargetFeature<"permlane16-swap",
+ "HasPermlane16Swap",
+ "true",
+ "Has v_permlane16_swap_b32 instructions"
+>;
+
+def FeaturePermlane32Swap : SubtargetFeature<"permlane32-swap",
+ "HasPermlane32Swap",
+ "true",
+ "Has v_permlane32_swap_b32 instructions"
+>;
+
def FeatureGFX950Insts : SubtargetFeature<"gfx950-insts",
"GFX950Insts",
"true",
- "Additional instructions for GFX950+"
+ "Additional instructions for GFX950+",
+ [FeaturePermlane16Swap, FeaturePermlane32Swap]
>;
def FeatureGFX10Insts : SubtargetFeature<"gfx10-insts",
@@ -1987,6 +2000,14 @@ def HasGFX950Insts :
Predicate<"Subtarget->hasGFX950Insts()">,
AssemblerPredicate<(all_of FeatureGFX950Insts)>;
+def HasPermlane16Swap :
+ Predicate<"Subtarget->hasPermlane16Swap()">,
+ AssemblerPredicate<(all_of FeaturePermlane16Swap)>;
+
+def HasPermlane32Swap :
+ Predicate<"Subtarget->hasPermlane32Swap()">,
+ AssemblerPredicate<(all_of FeaturePermlane32Swap)>;
+
def isGFX8GFX9NotGFX940 :
Predicate<"!Subtarget->hasGFX940Insts() &&"
"(Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS ||"
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 88fa96bd049f29..1b909568fc555c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -395,6 +395,9 @@ def gi_as_i8timm : GICustomOperandRenderer<"renderTruncTImm">,
def gi_as_i1timm : GICustomOperandRenderer<"renderTruncTImm">,
GISDNodeXFormEquiv<as_i1timm>;
+def gi_as_i1timm_zext : GICustomOperandRenderer<"renderZextBoolTImm">,
+ GISDNodeXFormEquiv<as_i1timm_zext>;
+
def gi_NegateImm : GICustomOperandRenderer<"renderNegateImm">,
GISDNodeXFormEquiv<NegateImm>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 151d56292b53d6..1c738e352b07b9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -2777,6 +2777,31 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
case Intrinsic::amdgcn_interp_p1_f16:
SelectInterpP1F16(N);
return;
+ case Intrinsic::amdgcn_permlane16_swap:
+ case Intrinsic::amdgcn_permlane32_swap: {
+ if ((IntrID == Intrinsic::amdgcn_permlane16_swap &&
+ !Subtarget->hasPermlane16Swap()) ||
+ (IntrID == Intrinsic::amdgcn_permlane32_swap &&
+ !Subtarget->hasPermlane32Swap())) {
+ SelectCode(N); // Hit the default error
+ return;
+ }
+
+ Opcode = IntrID == Intrinsic::amdgcn_permlane16_swap
+ ? AMDGPU::V_PERMLANE16_SWAP_B32_e64
+ : AMDGPU::V_PERMLANE32_SWAP_B32_e64;
+
+ SmallVector<SDValue, 4> NewOps(N->op_begin() + 1, N->op_end());
+ if (ConvGlueNode)
+ NewOps.push_back(SDValue(ConvGlueNode, 0));
+
+ bool FI = N->getConstantOperandVal(3);
+ NewOps[2] = CurDAG->getTargetConstant(
+ FI ? AMDGPU::DPP::DPP_FI_1 : AMDGPU::DPP::DPP_FI_0, SDLoc(), MVT::i32);
+
+ CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), NewOps);
+ return;
+ }
default:
SelectCode(N);
break;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 6dbe1fa62e7481..39bec6c7f2f56d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -1105,6 +1105,9 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
case Intrinsic::amdgcn_smfmac_f32_32x32x64_fp8_bf8:
case Intrinsic::amdgcn_smfmac_f32_32x32x64_fp8_fp8:
return selectSMFMACIntrin(I);
+ case Intrinsic::amdgcn_permlane16_swap:
+ case Intrinsic::amdgcn_permlane32_swap:
+ return selectPermlaneSwapIntrin(I, IntrinsicID);
default:
return selectImpl(I, *CoverageInfo);
}
@@ -3581,6 +3584,29 @@ bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
return true;
}
+bool AMDGPUInstructionSelector::selectPermlaneSwapIntrin(
+ MachineInstr &MI, Intrinsic::ID IntrID) const {
+ if (IntrID == Intrinsic::amdgcn_permlane16_swap &&
+ !Subtarget->hasPermlane16Swap())
+ return false;
+ if (IntrID == Intrinsic::amdgcn_permlane32_swap &&
+ !Subtarget->hasPermlane32Swap())
+ return false;
+
+ unsigned Opcode = IntrID == Intrinsic::amdgcn_permlane16_swap
+ ? AMDGPU::V_PERMLANE16_SWAP_B32_e64
+ : AMDGPU::V_PERMLANE32_SWAP_B32_e64;
+
+ MI.removeOperand(2);
+ MI.setDesc(TII.get(Opcode));
+ MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
+
+ MachineOperand &FI = MI.getOperand(4);
+ FI.setImm(FI.getImm() ? AMDGPU::DPP::DPP_FI_1 : AMDGPU::DPP::DPP_FI_0);
+
+ return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
+}
+
bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
@@ -5769,6 +5795,12 @@ void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
MIB.addImm(Op.getImm());
}
+void AMDGPUInstructionSelector::renderZextBoolTImm(MachineInstrBuilder &MIB,
+ const MachineInstr &MI,
+ int OpIdx) const {
+ MIB.addImm(MI.getOperand(OpIdx).getImm() != 0);
+}
+
void AMDGPUInstructionSelector::renderOpSelTImm(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index 563e40267f04b1..5b31cb827c9715 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -145,6 +145,7 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
bool selectGlobalLoadLds(MachineInstr &MI) const;
bool selectBVHIntrinsic(MachineInstr &I) const;
bool selectSMFMACIntrin(MachineInstr &I) const;
+ bool selectPermlaneSwapIntrin(MachineInstr &I, Intrinsic::ID IntrID) const;
bool selectWaveAddress(MachineInstr &I) const;
bool selectStackRestore(MachineInstr &MI) const;
bool selectNamedBarrierInit(MachineInstr &I, Intrinsic::ID IID) const;
@@ -328,6 +329,8 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
void renderTruncTImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
+ void renderZextBoolTImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
+ int OpIdx) const;
void renderOpSelTImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 2d12b0c316a533..8c050348f753bb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3146,6 +3146,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
case Intrinsic::amdgcn_interp_inreg_p2_f16:
case Intrinsic::amdgcn_interp_p10_rtz_f16:
case Intrinsic::amdgcn_interp_p2_rtz_f16:
+ case Intrinsic::amdgcn_permlane16_swap:
+ case Intrinsic::amdgcn_permlane32_swap:
applyDefaultMapping(OpdMapper);
return;
case Intrinsic::amdgcn_permlane16:
@@ -4860,6 +4862,13 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
break;
}
+ case Intrinsic::amdgcn_permlane16_swap:
+ case Intrinsic::amdgcn_permlane32_swap: {
+ unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ OpdsMapping[0] = OpdsMapping[1] = OpdsMapping[3] = OpdsMapping[4] =
+ AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
+ break;
+ }
case Intrinsic::amdgcn_ballot: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
index 2ea254e64b8cb8..bc8b373d06e01a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
@@ -326,6 +326,8 @@ def : SourceOfDivergence<int_amdgcn_mov_dpp8>;
def : SourceOfDivergence<int_amdgcn_update_dpp>;
def : SourceOfDivergence<int_amdgcn_writelane>;
def : SourceOfDivergence<int_amdgcn_init_whole_wave>;
+def : SourceOfDivergence<int_amdgcn_permlane16_swap>;
+def : SourceOfDivergence<int_amdgcn_permlane32_swap>;
foreach intr = AMDGPUMFMAIntrinsics908 in
def : SourceOfDivergence<intr>;
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index f3f96940c1f44b..c7022ce78650ea 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -221,6 +221,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
bool HasPseudoScalarTrans = false;
bool HasRestrictedSOffset = false;
bool HasPrngInst = false;
+ bool HasPermlane16Swap = false;
+ bool HasPermlane32Swap = false;
bool HasVcmpxPermlaneHazard = false;
bool HasVMEMtoScalarWriteHazard = false;
bool HasSMEMtoVectorWriteHazard = false;
@@ -1319,6 +1321,9 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
/// \returns true if the target has instructions with xf32 format support.
bool hasXF32Insts() const { return HasXF32Insts; }
+ bool hasPermlane16Swap() const { return HasPermlane16Swap; }
+ bool hasPermlane32Swap() const { return HasPermlane32Swap; }
+
bool hasMinimum3Maximum3F32() const {
return HasMinimum3Maximum3F32;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index b7c008235fb7ae..2c30bfcb5522a8 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4468,7 +4468,11 @@ bool SIInstrInfo::canShrink(const MachineInstr &MI,
// Check output modifiers
return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
!hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
- !hasModifiersSet(MI, AMDGPU::OpName::byte_sel);
+ !hasModifiersSet(MI, AMDGPU::OpName::byte_sel) &&
+ // TODO: Can we avoid checking bound_ctrl/fi here?
+ // They are only used by permlane*_swap special case.
+ !hasModifiersSet(MI, AMDGPU::OpName::bound_ctrl) &&
+ !hasModifiersSet(MI, AMDGPU::OpName::fi);
}
// Set VCC operand with all flags from \p Orig, except for setting it as
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 885f72494a8f68..67d9e4138753f7 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -807,6 +807,10 @@ def as_i1timm : SDNodeXForm<timm, [{
return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1);
}]>;
+def as_i1timm_zext : SDNodeXForm<timm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
def as_i8imm : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8);
}]>;
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 3cda173207dfb1..1dd39be9e8d9c7 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -380,6 +380,24 @@ def VOP_MOVRELS : VOPProfile<[i32, i32, untyped, untyped]> {
let Src0RC64 = VRegSrc_32;
}
+def VOP_PERMLANE_SWAP : VOPProfile<[i32, i32, untyped, untyped]> {
+ let Outs32 = (outs DstRC:$vdst, VRegSrc_32:$src0_out);
+ let Outs64 = (outs DstRC64:$vdst, VRegSrc_32:$src0_out);
+
+ let Src0RC32 = VRegSrc_32;
+ let Src0RC64 = VRegSrc_32;
+ let HasClamp = 0;
+ let HasExtVOP3DPP = 0;
+ let HasExtDPP = 0;
+ let HasExtSDWA = 0;
+
+ let Ins32 = (ins Src0RC64:$vdst_in, Src0RC32:$src0);
+ let Ins64 = (ins Src0RC64:$vdst_in, Src0RC64:$src0, Dpp16FI:$fi, DppBoundCtrl:$bound_ctrl);
+ let InsVOP3OpSel = (ins Src0RC64:$vdst_in, Src0RC64:$src0, Dpp16FI:$fi, DppBoundCtrl:$bound_ctrl);
+ let Asm64 = "$vdst, $src0$bound_ctrl$fi";
+ let AsmVOP3OpSel = "$vdst, $src0$bound_ctrl$fi";
+}
+
// Special case because there are no true output operands. Hack vdst
// to be a src operand. The custom inserter must add a tied implicit
// def and use of the super register since there seems to be no way to
@@ -767,6 +785,18 @@ let SubtargetPredicate = isGFX11Plus in {
let SubtargetPredicate = HasPrngInst in
defm V_PRNG_B32 : VOP1Inst <"v_prng_b32", VOP_I32_I32, int_amdgcn_prng_b32>;
+let Constraints = "$vdst = $vdst_in, $src0_out = $src0",
+ DisableEncoding="$vdst_in,$src0_out",
+ SchedRW = [Write32Bit, Write32Bit] in {
+let SubtargetPredicate = HasPermlane16Swap in {
+defm V_PERMLANE16_SWAP_B32 : VOP1Inst<"v_permlane16_swap_b32", VOP_PERMLANE_SWAP>;
+}
+
+let SubtargetPredicate = HasPermlane32Swap in {
+defm V_PERMLANE32_SWAP_B32 : VOP1Inst<"v_permlane32_swap_b32", VOP_PERMLANE_SWAP>;
+}
+}
+
foreach vt = Reg32Types.types in {
def : GCNPat<(int_amdgcn_permlane64 (vt VRegSrc_32:$src0)),
(vt (V_PERMLANE64_B32 (vt VRegSrc_32:$src0)))
@@ -1512,6 +1542,20 @@ let DecoderNamespace = "GFX9" in {
}
}
+/// Special case of VOP1 instructions, with a VOP3 form where op_sel
+/// is used for DPP operands.
+multiclass VOP1_OpSel_Real_e32e64_gfx9 <bits<10> op> {
+ let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in {
+ def _e32_gfx9 :
+ VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.GFX9>,
+ VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>;
+
+ def _e64_gfx9 :
+ VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX9>,
+ VOP3OpSelIsDPP_gfx9<!add(0x140, op), !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
+ }
+}
+
defm V_SCREEN_PARTITION_4SE_B32 : VOP1_Real_gfx9 <0x37>;
let AssemblerPredicate = isGFX940Plus in
@@ -1525,6 +1569,8 @@ defm V_CVT_PK_F32_FP8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x56>;
defm V_CVT_PK_F32_BF8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x57>;
defm V_PRNG_B32 : VOP1_Real_gfx9 <0x58>;
+defm V_PERMLANE16_SWAP_B32 : VOP1_OpSel_Real_e32e64_gfx9<0x059>;
+defm V_PERMLANE32_SWAP_B32 : VOP1_OpSel_Real_e32e64_gfx9<0x05a>;
class MovDPP8Pattern<Predicate Pred, Instruction Inst, ValueType vt> : GCNPat <
(vt (int_amdgcn_mov_dpp8 vt:$src, timm:$dpp8)),
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index eb9d00972468c2..34c7989b9d0b86 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -324,6 +324,18 @@ class VOP3OpSel_gfx9 <bits<10> op, VOPProfile P> : VOP3e_vi <op, P> {
let Inst{14} = !if(P.HasDst, src0_modifiers{3}, 0);
}
+// Special case for v_permlane16_swap_b32/v_permlane32_swap_b32
+// op_sel[0]/op_sel[1] are treated as bound_ctrl and fi dpp operands.
+class VOP3OpSelIsDPP_gfx9 <bits<10> op, VOPProfile P> : VOP3e_vi <op, P> {
+ bits<1> fi;
+ bits<1> bound_ctrl;
+
+ // OPSEL[0] specifies FI
+ let Inst{11} = fi;
+ // OPSEL[1] specifies BOUND_CTRL
+ let Inst{12} = bound_ctrl;
+}
+
class VOP3OpSel_gfx10<bits<10> op, VOPProfile p> : VOP3e_gfx10<op, p> {
let Inst{11} = !if(p.HasSrc0, src0_modifiers{2}, 0);
let Inst{12} = !if(p.HasSrc1, src1_modifiers{2}, 0);
diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp
index b236e26f495dfd..c60c5a0fc2bb78 100644
--- a/llvm/lib/TargetParser/TargetParser.cpp
+++ b/llvm/lib/TargetParser/TargetParser.cpp
@@ -471,6 +471,8 @@ void AMDGPU::fillAMDGPUFeatureMap(StringRef GPU, const Triple &T,
break;
case GK_GFX950:
Features["prng-inst"] = true;
+ Features["permlane16-swap"] = true;
+ Features["permlane32-swap"] = true;
Features["gfx950-insts"] = true;
[[fallthrough]];
case GK_GFX942:
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
index 3810938a5a52fd..0dfd1d880f9cf2 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
@@ -440,6 +440,22 @@ define amdgpu_kernel void @smfmac_f32_32x32x64_fp8_fp8(<4 x i32> %arg0, <8 x i32
ret void
}
+; CHECK: DIVERGENT: %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %src0, i32 %src1, i1 false, i1 false)
+define amdgpu_kernel void @v_permlane16_swap(ptr addrspace(1) %out, i32 %src0, i32 %src1) #0 {
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %src0, i32 %src1, i1 false, i1 false)
+ store { i32, i32 } %v, ptr addrspace(1) %out
+ ret void
+}
+
+; CHECK: DIVERGENT: %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %src0, i32 %src1, i1 false, i1 false)
+define amdgpu_kernel void @v_permlane32_swap(ptr addrspace(1) %out, i32 %src0, i32 %src1) #0 {
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %src0, i32 %src1, i1 false, i1 false)
+ store { i32, i32 } %v, ptr addrspace(1) %out
+ ret void
+}
+
+
+
declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #1
declare i32 @llvm.amdgcn.permlane16.i32(i32, i32, i32, i32, i1, i1) #1
declare i32 @llvm.amdgcn.permlanex16.i32(i32, i32, i32, i32, i1, i1) #1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane16.swap.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane16.swap.ll
new file mode 100644
index 00000000000000..0d5dfa46c2c260
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane16.swap.ll
@@ -0,0 +1,121 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 < %s | FileCheck -check-prefix=GCN %s
+
+; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR-SDAG %s
+; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR-GISEL %s
+
+; ERR-SDAG: LLVM ERROR: Cannot select: intrinsic %llvm.amdgcn.permlane16.swap
+; ERR-GISEL: LLVM ERROR: cannot select: %{{[0-9]+}}:vgpr_32(s32), %{{[0-9]+}}:vgpr_32(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.permlane16.swap)
+
+
+declare { i32, i32 } @llvm.amdgcn.permlane16.swap(i32, i32, i1 immarg, i1 immarg)
+
+define { i32, i32 } @v_permlane16_swap_b32_vv(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_vv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane16_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_vi(i32 %vdst_old) {
+; GCN-LABEL: v_permlane16_swap_b32_vi:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, 1
+; GCN-NEXT: v_permlane16_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 1, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_vl(i32 %vdst_old) {
+; GCN-LABEL: v_permlane16_swap_b32_vl:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, 0xc1d1
+; GCN-NEXT: v_permlane16_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 49617, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_iv(i32 %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_iv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, v0
+; GCN-NEXT: v_mov_b32_e32 v0, 1
+; GCN-NEXT: v_permlane16_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 1, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_ss(i32 inreg %vdst_old, i32 inreg %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_ss:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: v_permlane16_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_sv(i32 inreg %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_sv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, v0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_permlane16_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_vs(i32 %vdst_old, i32 inreg %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_vs:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, s0
+; GCN-NEXT: v_permlane16_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_vv_fi(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_vv_fi:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane16_swap_b32_e64 v0, v1 fi:1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 %src0_old, i1 true, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_vv_bc(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_vv_bc:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane16_swap_b32_e64 v0, v1 bound_ctrl:1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 true)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane16_swap_b32_vv_fi_bc(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane16_swap_b32_vv_fi_bc:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane16_swap_b32_e64 v0, v1 bound_ctrl:1 fi:1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane16.swap(i32 %vdst_old, i32 %src0_old, i1 true, i1 true)
+ ret { i32, i32 } %v
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane32.swap.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane32.swap.ll
new file mode 100644
index 00000000000000..e3b0879af4307d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane32.swap.ll
@@ -0,0 +1,121 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 < %s | FileCheck -check-prefix=GCN %s
+
+; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR-SDAG %s
+; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR-GISEL %s
+
+; ERR-SDAG: LLVM ERROR: Cannot select: intrinsic %llvm.amdgcn.permlane32.swap
+; ERR-GISEL: LLVM ERROR: cannot select: %{{[0-9]+}}:vgpr_32(s32), %{{[0-9]+}}:vgpr_32(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.permlane32.swap)
+
+
+declare { i32, i32 } @llvm.amdgcn.permlane32.swap(i32, i32, i1 immarg, i1 immarg)
+
+define { i32, i32 } @v_permlane32_swap_b32_vv(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_vv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane32_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_vi(i32 %vdst_old) {
+; GCN-LABEL: v_permlane32_swap_b32_vi:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, 1
+; GCN-NEXT: v_permlane32_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 1, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_vl(i32 %vdst_old) {
+; GCN-LABEL: v_permlane32_swap_b32_vl:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, 0xc1d1
+; GCN-NEXT: v_permlane32_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 49617, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_iv(i32 %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_iv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, v0
+; GCN-NEXT: v_mov_b32_e32 v0, 1
+; GCN-NEXT: v_permlane32_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 1, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_ss(i32 inreg %vdst_old, i32 inreg %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_ss:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: v_permlane32_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_sv(i32 inreg %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_sv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, v0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_permlane32_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_vs(i32 %vdst_old, i32 inreg %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_vs:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v1, s0
+; GCN-NEXT: v_permlane32_swap_b32_e32 v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_vv_fi(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_vv_fi:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane32_swap_b32_e64 v0, v1 fi:1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 %src0_old, i1 true, i1 false)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_vv_bc(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_vv_bc:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane32_swap_b32_e64 v0, v1 bound_ctrl:1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 %src0_old, i1 false, i1 true)
+ ret { i32, i32 } %v
+}
+
+define { i32, i32 } @v_permlane32_swap_b32_vv_fi_bc(i32 %vdst_old, i32 %src0_old) {
+; GCN-LABEL: v_permlane32_swap_b32_vv_fi_bc:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_permlane32_swap_b32_e64 v0, v1 bound_ctrl:1 fi:1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %v = call { i32, i32 } @llvm.amdgcn.permlane32.swap(i32 %vdst_old, i32 %src0_old, i1 true, i1 true)
+ ret { i32, i32 } %v
+}
diff --git a/llvm/test/MC/AMDGPU/gfx950_asm_features.s b/llvm/test/MC/AMDGPU/gfx950_asm_features.s
index 405d152c93d867..ad1ce40ddd6a45 100644
--- a/llvm/test/MC/AMDGPU/gfx950_asm_features.s
+++ b/llvm/test/MC/AMDGPU/gfx950_asm_features.s
@@ -35,3 +35,85 @@ global_load_lds_dwordx4 v[2:3], off offset:4
// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
// GFX950: global_load_lds_dwordx4 v2, s[4:5] offset:4 ; encoding: [0x04,0x80,0xf4,0xdd,0x02,0x00,0x04,0x00]
global_load_lds_dwordx4 v2, s[4:5] offset:4
+
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e32 v1, v2 ; encoding: [0x02,0xb3,0x02,0x7e]
+v_permlane16_swap_b32 v1, v2
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e32 v1, v2 ; encoding: [0x02,0xb3,0x02,0x7e]
+v_permlane16_swap_b32_e32 v1, v2
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x99,0xd1,0x02,0x01,0x00,0x00]
+v_permlane16_swap_b32_e64 v1, v2
+
+// FIXME: Parsed as bound_ctrl:1?
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e64 v1, v2 bound_ctrl:1 ; encoding: [0x01,0x10,0x99,0xd1,0x02,0x01,0x00,0x00]
+v_permlane16_swap_b32 v1, v2 bound_ctrl:0
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x99,0xd1,0x02,0x01,0x00,0x00]
+v_permlane16_swap_b32 v1, v2 fi:0
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e64 v1, v2 bound_ctrl:1 ; encoding: [0x01,0x10,0x99,0xd1,0x02,0x01,0x00,0x00]
+v_permlane16_swap_b32 v1, v2 bound_ctrl:1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e64 v1, v2 fi:1 ; encoding: [0x01,0x08,0x99,0xd1,0x02,0x01,0x00,0x00]
+v_permlane16_swap_b32 v1, v2 fi:1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1 ; encoding: [0x01,0x18,0x99,0xd1,0x02,0x01,0x00,0x00]
+v_permlane16_swap_b32 v1, v2 bound_ctrl:1 fi:1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane16_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1 ; encoding: [0x01,0x18,0x99,0xd1,0x02,0x01,0x00,0x00]
+v_permlane16_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1
+
+// FIXME: Swapped order not accepted
+// v_permlane16_swap_b32 v1, v2 fi:1 bound_ctrl:1
+
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e32 v1, v2 ; encoding: [0x02,0xb5,0x02,0x7e]
+v_permlane32_swap_b32 v1, v2
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e32 v1, v2 ; encoding: [0x02,0xb5,0x02,0x7e]
+v_permlane32_swap_b32_e32 v1, v2
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x9a,0xd1,0x02,0x01,0x00,0x00]
+v_permlane32_swap_b32_e64 v1, v2
+
+// FIXME: Parsed as bound_ctrl:1?
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e64 v1, v2 bound_ctrl:1 ; encoding: [0x01,0x10,0x9a,0xd1,0x02,0x01,0x00,0x00]
+v_permlane32_swap_b32 v1, v2 bound_ctrl:0
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x9a,0xd1,0x02,0x01,0x00,0x00]
+v_permlane32_swap_b32 v1, v2 fi:0
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e64 v1, v2 bound_ctrl:1 ; encoding: [0x01,0x10,0x9a,0xd1,0x02,0x01,0x00,0x00]
+v_permlane32_swap_b32 v1, v2 bound_ctrl:1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e64 v1, v2 fi:1 ; encoding: [0x01,0x08,0x9a,0xd1,0x02,0x01,0x00,0x00]
+v_permlane32_swap_b32 v1, v2 fi:1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1 ; encoding: [0x01,0x18,0x9a,0xd1,0x02,0x01,0x00,0x00]
+v_permlane32_swap_b32 v1, v2 bound_ctrl:1 fi:1
+
+// NOT-GFX950: :[[@LINE+2]]:{{[0-9]+}}: error:
+// GFX950: v_permlane32_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1 ; encoding: [0x01,0x18,0x9a,0xd1,0x02,0x01,0x00,0x00]
+v_permlane32_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1
+
+// FIXME: Swapped order not accepted
+// v_permlane32_swap_b32 v1, v2 fi:1 bound_ctrl:1
diff --git a/llvm/test/MC/AMDGPU/gfx950_err.s b/llvm/test/MC/AMDGPU/gfx950_err.s
new file mode 100644
index 00000000000000..3f9bf3beef3aac
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx950_err.s
@@ -0,0 +1,31 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx950 %s 2>&1 | FileCheck --check-prefix=GFX950 --implicit-check-not=error: %s
+
+// GFX950: :[[@LINE+1]]:27: error: invalid operand for instruction
+v_permlane16_swap_b32 v0, s0
+
+// GFX950: :[[@LINE+1]]:27: error: invalid operand for instruction
+v_permlane16_swap_b32 v0, m0
+
+// GFX950: :[[@LINE+1]]:27: error: invalid operand for instruction
+v_permlane16_swap_b32 v0, vcc
+
+// GFX950: :[[@LINE+1]]:27: error: invalid operand for instruction
+v_permlane16_swap_b32 v0, vcc_lo
+
+// GFX950: :[[@LINE+1]]:23: error: invalid operand for instruction
+v_permlane16_swap_b32 s0, v0
+
+// GFX950: :[[@LINE+1]]:34: error: invalid operand for instruction
+v_permlane16_swap_b32_e32 v1, v2 bound_ctrl:1
+
+// GFX950: :[[@LINE+1]]:34: error: invalid operand for instruction
+v_permlane16_swap_b32_e32 v1, v2 bound_ctrl:0
+
+// GFX950: :[[@LINE+1]]:34: error: invalid operand for instruction
+v_permlane16_swap_b32_e32 v1, v2 fi:1
+
+// GFX950: :[[@LINE+1]]:34: error: invalid operand for instruction
+v_permlane16_swap_b32_e32 v1, v2 fi:0
+
+// GFX950: :[[@LINE+1]]:34: error: invalid operand for instruction
+v_permlane16_swap_b32_e32 v1, v2 bound_ctrl:1 fi:1
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
index ce37e228f03fa3..3852845d308834 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx950.txt
@@ -42,3 +42,35 @@
# GFX950: buffer_load_dwordx4 v0, s[8:11], s101 offen lds ; encoding: [0x00,0x10,0x5d,0xe0,0x00,0x00,0x02,0x65]
0x00,0x10,0x5d,0xe0,0x00,0x00,0x02,0x65
+
+
+# GFX950: v_permlane16_swap_b32_e32 v1, v2 ; encoding: [0x02,0xb3,0x02,0x7e]
+0x02,0xb3,0x02,0x7e
+
+# GFX950: v_permlane16_swap_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x99,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x00,0x99,0xd1,0x02,0x01,0x00,0x00
+
+# GFX950: v_permlane16_swap_b32_e64 v1, v2 bound_ctrl:1 ; encoding: [0x01,0x10,0x99,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x10,0x99,0xd1,0x02,0x01,0x00,0x00
+
+# GFX950: v_permlane16_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1 ; encoding: [0x01,0x18,0x99,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x18,0x99,0xd1,0x02,0x01,0x00,0x00
+
+# GFX950: v_permlane16_swap_b32_e64 v1, v2 fi:1 ; encoding: [0x01,0x08,0x99,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x08,0x99,0xd1,0x02,0x01,0x00,0x00
+
+
+# GFX950: v_permlane32_swap_b32_e32 v1, v2 ; encoding: [0x02,0xb5,0x02,0x7e]
+0x02,0xb5,0x02,0x7e
+
+# GFX950: v_permlane32_swap_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x9a,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x00,0x9a,0xd1,0x02,0x01,0x00,0x00
+
+# GFX950: v_permlane32_swap_b32_e64 v1, v2 bound_ctrl:1 ; encoding: [0x01,0x10,0x9a,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x10,0x9a,0xd1,0x02,0x01,0x00,0x00
+
+# GFX950: v_permlane32_swap_b32_e64 v1, v2 bound_ctrl:1 fi:1 ; encoding: [0x01,0x18,0x9a,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x18,0x9a,0xd1,0x02,0x01,0x00,0x00
+
+# GFX950: v_permlane32_swap_b32_e64 v1, v2 fi:1 ; encoding: [0x01,0x08,0x9a,0xd1,0x02,0x01,0x00,0x00]
+0x01,0x08,0x9a,0xd1,0x02,0x01,0x00,0x00
More information about the cfe-commits
mailing list