[Mlir-commits] [mlir] [AMDGPU] Make S_MOV_B64_IMM_PSEUDO foldable (PR #69483)
Stanislav Mekhanoshin
llvmlistbot at llvm.org
Wed Oct 18 11:28:00 PDT 2023
https://github.com/rampitec updated https://github.com/llvm/llvm-project/pull/69483
>From 27ab57359ea876c0ce78e42d8ab1ffc47348efb1 Mon Sep 17 00:00:00 2001
From: Stanislav Mekhanoshin <Stanislav.Mekhanoshin at amd.com>
Date: Wed, 18 Oct 2023 09:50:44 -0700
Subject: [PATCH] [AMDGPU] Make S_MOV_B64_IMM_PSEUDO foldable
With the legality checks in place it is now safe to do. S_MOV_B64
shall not be used with wide literals, thus updating the test.
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 1 +
.../AMDGPU/fold-short-64-bit-literals.mir | 23 +++++++++++++------
2 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d6733bfa058acee..4ff7b462f0f3295 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -3170,6 +3170,7 @@ bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
case AMDGPU::V_MOV_B64_e64:
case AMDGPU::S_MOV_B32:
case AMDGPU::S_MOV_B64:
+ case AMDGPU::S_MOV_B64_IMM_PSEUDO:
case AMDGPU::COPY:
case AMDGPU::WWM_COPY:
case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
diff --git a/llvm/test/CodeGen/AMDGPU/fold-short-64-bit-literals.mir b/llvm/test/CodeGen/AMDGPU/fold-short-64-bit-literals.mir
index 328ee991da8f4a6..6e975c8a5370758 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-short-64-bit-literals.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-short-64-bit-literals.mir
@@ -9,11 +9,11 @@ body: |
; GCN-LABEL: name: no_fold_fp_64bit_literal_sgpr
; GCN: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
- ; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 1311768467750121200
- ; GCN-NEXT: [[V_ADD_F64_e64_:%[0-9]+]]:vreg_64 = V_ADD_F64_e64 0, [[S_MOV_B64_]], 0, [[DEF]], 0, 0, implicit $mode, implicit $exec
+ ; GCN-NEXT: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 1311768467750121200
+ ; GCN-NEXT: [[V_ADD_F64_e64_:%[0-9]+]]:vreg_64 = V_ADD_F64_e64 0, [[S_MOV_B]], 0, [[DEF]], 0, 0, implicit $mode, implicit $exec
; GCN-NEXT: SI_RETURN_TO_EPILOG [[V_ADD_F64_e64_]]
%0:vreg_64 = IMPLICIT_DEF
- %1:sreg_64 = S_MOV_B64 1311768467750121200
+ %1:sreg_64 = S_MOV_B64_IMM_PSEUDO 1311768467750121200
%2:vreg_64 = V_ADD_F64_e64 0, %1, 0, %0, 0, 0, implicit $mode, implicit $exec
SI_RETURN_TO_EPILOG %2
...
@@ -46,7 +46,7 @@ body: |
; GCN-NEXT: [[V_ADD_F64_e64_:%[0-9]+]]:vreg_64 = V_ADD_F64_e64 0, 4636737291354636288, 0, [[DEF]], 0, 0, implicit $mode, implicit $exec
; GCN-NEXT: SI_RETURN_TO_EPILOG [[V_ADD_F64_e64_]]
%0:vreg_64 = IMPLICIT_DEF
- %1:sreg_64 = S_MOV_B64 4636737291354636288
+ %1:sreg_64 = S_MOV_B64_IMM_PSEUDO 4636737291354636288
%2:vreg_64 = V_ADD_F64_e64 0, %1, 0, %0, 0, 0, implicit $mode, implicit $exec
SI_RETURN_TO_EPILOG %2
...
@@ -59,11 +59,11 @@ body: |
; GCN-LABEL: name: no_fold_int_64bit_literal_sgpr
; GCN: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
- ; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 1311768467750121200
- ; GCN-NEXT: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[DEF]], [[S_MOV_B64_]], implicit-def $scc
+ ; GCN-NEXT: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 1311768467750121200
+ ; GCN-NEXT: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[DEF]], [[S_MOV_B]], implicit-def $scc
; GCN-NEXT: SI_RETURN_TO_EPILOG [[S_AND_B64_]]
%0:sreg_64 = IMPLICIT_DEF
- %1:sreg_64 = S_MOV_B64 1311768467750121200
+ %1:sreg_64 = S_MOV_B64_IMM_PSEUDO 1311768467750121200
%2:sreg_64 = S_AND_B64 %0, %1, implicit-def $scc
SI_RETURN_TO_EPILOG %2
...
@@ -106,6 +106,11 @@ tracksRegLiveness: true
body: |
bb.0:
+ ; GCN-LABEL: name: no_fold_v2fp_64bit_literal_sgpr
+ ; GCN: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64 = V_MOV_B64_PSEUDO 4629700418019000320, implicit $exec
+ ; GCN-NEXT: [[V_PK_ADD_F32_:%[0-9]+]]:vreg_64 = V_PK_ADD_F32 0, [[DEF]], 0, [[V_MOV_B]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+ ; GCN-NEXT: SI_RETURN_TO_EPILOG [[V_PK_ADD_F32_]]
%0:vreg_64 = IMPLICIT_DEF
%1:vreg_64 = V_MOV_B64_PSEUDO 4629700418019000320, implicit $exec
%2:vreg_64 = V_PK_ADD_F32 0, %0, 0, %1, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
@@ -118,6 +123,10 @@ tracksRegLiveness: true
body: |
bb.0:
+ ; GCN-LABEL: name: fold_v2fp_32bit_literal_sgpr
+ ; GCN: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_PK_ADD_F32_:%[0-9]+]]:vreg_64 = V_PK_ADD_F32 0, [[DEF]], 0, 1065353216, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+ ; GCN-NEXT: SI_RETURN_TO_EPILOG [[V_PK_ADD_F32_]]
%0:vreg_64 = IMPLICIT_DEF
%1:vreg_64 = V_MOV_B64_PSEUDO 1065353216, implicit $exec
%2:vreg_64 = V_PK_ADD_F32 0, %0, 0, %1, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
More information about the Mlir-commits
mailing list