[llvm] ff97187 - [GlobalISel] Fix legality checks for G_UBFX combines

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 8 01:32:46 PST 2022


Author: Jay Foad
Date: 2022-01-08T09:20:44Z
New Revision: ff971873b3fcb8f96fdc53f0140257b2c7979ec5

URL: https://github.com/llvm/llvm-project/commit/ff971873b3fcb8f96fdc53f0140257b2c7979ec5
DIFF: https://github.com/llvm/llvm-project/commit/ff971873b3fcb8f96fdc53f0140257b2c7979ec5.diff

LOG: [GlobalISel] Fix legality checks for G_UBFX combines

1. Fix CombinerHelper::matchBitfieldExtractFromAnd to check legality
   with the correct types for the G_UBFX that it builds.
2. Fix AMDGPUTargetLowering::isConstantUnsignedBitfieldExtractLegal to
   match the legality rules: result and first operand can be s32 or s64
   but the "shift amount" operands are always s32.
3. Add AMDGPU tests where the post-legalizer combiner would create
   illegal MIR without the above fixes.

Differential Revision: https://reviews.llvm.org/D116802

Added: 
    

Modified: 
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-ubfx.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/ubfx.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 7bef8a88427e3..8a65f2552dfec 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -4127,8 +4127,9 @@ bool CombinerHelper::matchBitfieldExtractFromAnd(
   assert(MI.getOpcode() == TargetOpcode::G_AND);
   Register Dst = MI.getOperand(0).getReg();
   LLT Ty = MRI.getType(Dst);
+  LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
   if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
-          TargetOpcode::G_UBFX, Ty, Ty))
+          TargetOpcode::G_UBFX, Ty, ExtractTy))
     return false;
 
   int64_t AndImm, LSBImm;
@@ -4148,7 +4149,6 @@ bool CombinerHelper::matchBitfieldExtractFromAnd(
   if (static_cast<uint64_t>(LSBImm) >= Size)
     return false;
 
-  LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
   uint64_t Width = APInt(Size, AndImm).countTrailingOnes();
   MatchInfo = [=](MachineIRBuilder &B) {
     auto WidthCst = B.buildConstant(ExtractTy, Width);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 4912ddc29c6f5..cec7458eb9c29 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4906,5 +4906,6 @@ AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
 
 bool AMDGPUTargetLowering::isConstantUnsignedBitfieldExtractLegal(
     unsigned Opc, LLT Ty1, LLT Ty2) const {
-  return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));
+  return (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)) &&
+         Ty2 == LLT::scalar(32);
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-ubfx.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-ubfx.mir
index 6b20ace6812a6..24485f1239a46 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-ubfx.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-ubfx.mir
@@ -105,3 +105,56 @@ body: |
     $vgpr0 = COPY %4(s32)
 
 ...
+
+---
+name:            bfe_lshr_and_s32
+legalized:       true
+tracksRegLiveness: true
+
+body: |
+  bb.0.entry:
+  liveins: $vgpr0
+
+    ; GCN-LABEL: name: bfe_lshr_and_s32
+    ; GCN: liveins: $vgpr0
+    ; GCN-NEXT: {{  $}}
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; GCN-NEXT: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C1]](s32), [[C]]
+    ; GCN-NEXT: $vgpr0 = COPY [[UBFX]](s32)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_CONSTANT i32 7936 ; 31 << 8
+    %2:_(s32) = G_AND %0, %1
+    %3:_(s32) = G_CONSTANT i32 8
+    %4:_(s32) = G_LSHR %2, %3(s32)
+    $vgpr0 = COPY %4(s32)
+
+...
+
+---
+name:            bfe_lshr_and_s64
+legalized:       true
+tracksRegLiveness: true
+
+body: |
+  bb.0.entry:
+  liveins: $vgpr0_vgpr1
+
+    ; GCN-LABEL: name: bfe_lshr_and_s64
+    ; GCN: liveins: $vgpr0_vgpr1
+    ; GCN-NEXT: {{  $}}
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 261888
+    ; GCN-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; GCN-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C1]](s32)
+    ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[LSHR]](s64)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s64) = G_CONSTANT i64 261888 ; 1023 << 8
+    %2:_(s64) = G_AND %0, %1
+    %3:_(s32) = G_CONSTANT i32 8
+    %4:_(s64) = G_LSHR %2, %3(s32)
+    $vgpr0_vgpr1 = COPY %4(s64)
+
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ubfx.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ubfx.ll
index bb312b038ddfe..1c2a95a592919 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ubfx.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ubfx.ll
@@ -44,6 +44,36 @@ define amdgpu_ps i32 @s_srl_big_mask_i32(i32 inreg %value) {
  ret i32 %2
 }
 
+; Test vector bitfield extract.
+define i32 @v_mask_srl_i32(i32 %value) {
+; GFX89-LABEL: v_mask_srl_i32:
+; GFX89:       ; %bb.0:
+; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT:    v_bfe_u32 v0, v0, 8, 5
+; GFX89-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mask_srl_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_bfe_u32 v0, v0, 8, 5
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+ %1 = and i32 %value, 7936 ; 31 << 8
+ %2 = lshr i32 %1, 8
+ ret i32 %2
+}
+
+; Test scalar bitfield extract.
+define amdgpu_ps i32 @s_mask_srl_i32(i32 inreg %value) {
+; GCN-LABEL: s_mask_srl_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_bfe_u32 s0, s0, 0x50008
+; GCN-NEXT:    ; return to shader part epilog
+ %1 = and i32 %value, 7936 ; 31 << 8
+ %2 = lshr i32 %1, 8
+ ret i32 %2
+}
+
 ; Test vector bitfield extract for 64-bits.
 define i64 @v_srl_mask_i64(i64 %value) {
 ; GFX89-LABEL: v_srl_mask_i64:
@@ -89,3 +119,40 @@ define amdgpu_ps i64 @s_srl_big_mask_i64(i64 inreg %value) {
  %2 = and i64 %1, 63
  ret i64 %2
 }
+
+; Test vector bitfield extract for 64-bits.
+define i64 @v_mask_srl_i64(i64 %value) {
+; GFX89-LABEL: v_mask_srl_i64:
+; GFX89:       ; %bb.0:
+; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT:    v_and_b32_e32 v0, 0xfe000000, v0
+; GFX89-NEXT:    v_and_b32_e32 v1, 7, v1
+; GFX89-NEXT:    v_lshrrev_b64 v[0:1], 25, v[0:1]
+; GFX89-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mask_srl_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xfe000000, v0
+; GFX10-NEXT:    v_and_b32_e32 v1, 7, v1
+; GFX10-NEXT:    v_lshrrev_b64 v[0:1], 25, v[0:1]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+ %1 = and i64 %value, 34326183936 ; 1023 << 25
+ %2 = lshr i64 %1, 25
+ ret i64 %2
+}
+
+; Test scalar bitfield extract for 64-bits.
+define amdgpu_ps i64 @s_mask_srl_i64(i64 inreg %value) {
+; GCN-LABEL: s_mask_srl_i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b32 s2, 0xfe000000
+; GCN-NEXT:    s_mov_b32 s3, 7
+; GCN-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT:    s_lshr_b64 s[0:1], s[0:1], 25
+; GCN-NEXT:    ; return to shader part epilog
+ %1 = and i64 %value, 34326183936 ; 1023 << 25
+ %2 = lshr i64 %1, 25
+ ret i64 %2
+}


        


More information about the llvm-commits mailing list