[llvm] [GlobalIsel][NFC] Modernize UBFX combine (PR #97513)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 2 21:15:18 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-globalisel

Author: Thorsten Schütt (tschuett)

<details>
<summary>Changes</summary>

Credits: https://reviews.llvm.org/D99283

---
Full diff: https://github.com/llvm/llvm-project/pull/97513.diff


3 Files Affected:

- (modified) llvm/include/llvm/Target/GlobalISel/Combine.td (+4-1) 
- (modified) llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp (+7-5) 
- (modified) llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir (+99-83) 


``````````diff
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index b0789fca630e8..3ef0636ebf1c7 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1026,7 +1026,10 @@ def and_or_disjoint_mask : GICombineRule<
 
 def bitfield_extract_from_and : GICombineRule<
   (defs root:$root, build_fn_matchinfo:$info),
-  (match (wip_match_opcode G_AND):$root,
+  (match (G_CONSTANT $mask, $imm2),
+         (G_CONSTANT $lsb, $imm1),
+         (G_LSHR $shift, $x, $lsb),
+         (G_AND $root, $shift, $mask):$root,
     [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
   (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index d57dd6fca0140..693e664a8d257 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -4521,19 +4521,21 @@ bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
 }
 
 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
-bool CombinerHelper::matchBitfieldExtractFromAnd(
-    MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
-  assert(MI.getOpcode() == TargetOpcode::G_AND);
-  Register Dst = MI.getOperand(0).getReg();
+bool CombinerHelper::matchBitfieldExtractFromAnd(MachineInstr &MI,
+                                                 BuildFnTy &MatchInfo) {
+  GAnd *And = cast<GAnd>(&MI);
+  Register Dst = And->getReg(0);
   LLT Ty = MRI.getType(Dst);
   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
+  // Note that isLegalOrBeforeLegalizer is stricter and does not take custom
+  // into account.
   if (LI && !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}}))
     return false;
 
   int64_t AndImm, LSBImm;
   Register ShiftSrc;
   const unsigned Size = Ty.getScalarSizeInBits();
-  if (!mi_match(MI.getOperand(0).getReg(), MRI,
+  if (!mi_match(And->getReg(0), MRI,
                 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
                        m_ICst(AndImm))))
     return false;
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
index 141e6c4d47038..16e84a6c1af80 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
@@ -6,6 +6,10 @@
 #
 # and (lshr x, cst), mask -> ubfx x, cst, width
 
+# LSB = 5
+# Width = LSB + trailing_ones(255) - 1 =
+#         5 + 8 - 1 = 12
+
 ...
 ---
 name:            ubfx_s32
@@ -15,18 +19,16 @@ body:             |
   bb.0:
     liveins: $w0
 
-    ; LSB = 5
-    ; Width = LSB + trailing_ones(255) - 1 =
-    ;         5 + 8 - 1 = 12
 
     ; CHECK-LABEL: name: ubfx_s32
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK: %and:_(s32) = G_UBFX %x, %lsb(s32), [[C]]
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; CHECK-NEXT: %and:_(s32) = G_UBFX %x, %lsb(s32), [[C]]
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %lsb:_(s32) = G_CONSTANT i32 5
     %mask:_(s32) = G_CONSTANT i32 255
@@ -35,6 +37,10 @@ body:             |
     $w0 = COPY %and
     RET_ReallyLR implicit $w0
 
+# LSB = 5
+# Width = LSB + trailing_ones(1) - 1 =
+#          5 + 1 - 1 = 5
+
 ...
 ---
 name:            ubfx_s64
@@ -44,18 +50,15 @@ body:             |
   bb.0:
     liveins: $x0
 
-    ; LSB = 5
-    ; Width = LSB + trailing_ones(1) - 1 =
-    ;         5 + 1 - 1 = 5
-
     ; CHECK-LABEL: name: ubfx_s64
     ; CHECK: liveins: $x0
-    ; CHECK: %x:_(s64) = COPY $x0
-    ; CHECK: %lsb:_(s64) = G_CONSTANT i64 5
-    ; CHECK: %mask:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %and:_(s64) = G_UBFX %x, %lsb(s64), %mask
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s64) = COPY $x0
+    ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 5
+    ; CHECK-NEXT: %mask:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %and:_(s64) = G_UBFX %x, %lsb(s64), %mask
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s64) = COPY $x0
     %lsb:_(s64) = G_CONSTANT i64 5
     %mask:_(s64) = G_CONSTANT i64 1
@@ -64,6 +67,8 @@ body:             |
     $x0 = COPY %and
     RET_ReallyLR implicit $x0
 
+# UBFX needs to be selected to UBFMWri/UBFMXri, so we need constants.
+
 ...
 ---
 name:            dont_combine_no_and_cst
@@ -73,17 +78,17 @@ body:             |
   bb.0:
     liveins: $w0, $w1
 
-    ; UBFX needs to be selected to UBFMWri/UBFMXri, so we need constants.
 
     ; CHECK-LABEL: name: dont_combine_no_and_cst
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
-    ; CHECK: %shift:_(s32) = G_LSHR %x, %lsb(s32)
-    ; CHECK: %and:_(s32) = G_AND %shift, %y
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %lsb(s32)
+    ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %y
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %lsb:_(s32) = G_CONSTANT i32 5
@@ -102,13 +107,14 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: dont_combine_and_cst_not_mask
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
-    ; CHECK: %not_a_mask:_(s32) = G_CONSTANT i32 2
-    ; CHECK: %shift:_(s32) = G_LSHR %x, %lsb(s32)
-    ; CHECK: %and:_(s32) = G_AND %shift, %not_a_mask
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: %not_a_mask:_(s32) = G_CONSTANT i32 2
+    ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %lsb(s32)
+    ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %not_a_mask
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %lsb:_(s32) = G_CONSTANT i32 5
     %not_a_mask:_(s32) = G_CONSTANT i32 2
@@ -127,14 +133,15 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: dont_combine_shift_more_than_one_use
     ; CHECK: liveins: $x0
-    ; CHECK: %x:_(s64) = COPY $x0
-    ; CHECK: %lsb:_(s64) = G_CONSTANT i64 5
-    ; CHECK: %mask:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %shift:_(s64) = G_LSHR %x, %lsb(s64)
-    ; CHECK: %and:_(s64) = G_AND %shift, %mask
-    ; CHECK: %sub:_(s64) = G_SUB %and, %shift
-    ; CHECK: $x0 = COPY %sub(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s64) = COPY $x0
+    ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 5
+    ; CHECK-NEXT: %mask:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %shift:_(s64) = G_LSHR %x, %lsb(s64)
+    ; CHECK-NEXT: %and:_(s64) = G_AND %shift, %mask
+    ; CHECK-NEXT: %sub:_(s64) = G_SUB %and, %shift
+    ; CHECK-NEXT: $x0 = COPY %sub(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s64) = COPY $x0
     %lsb:_(s64) = G_CONSTANT i64 5
     %mask:_(s64) = G_CONSTANT i64 1
@@ -144,6 +151,8 @@ body:             |
     $x0 = COPY %sub
     RET_ReallyLR implicit $x0
 
+# LSB must be in [0, reg_size)
+
 ...
 ---
 name:            dont_combine_negative_lsb
@@ -153,17 +162,17 @@ body:             |
   bb.0:
     liveins: $w0
 
-    ; LSB must be in [0, reg_size)
 
     ; CHECK-LABEL: name: dont_combine_negative_lsb
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %negative:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: %mask:_(s32) = G_CONSTANT i32 255
-    ; CHECK: %shift:_(s32) = G_LSHR %x, %negative(s32)
-    ; CHECK: %and:_(s32) = G_AND %shift, %mask
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %negative:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: %mask:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %negative(s32)
+    ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %mask
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %negative:_(s32) = G_CONSTANT i32 -1
     %mask:_(s32) = G_CONSTANT i32 255
@@ -172,6 +181,8 @@ body:             |
     $w0 = COPY %and
     RET_ReallyLR implicit $w0
 
+# LSB must be in [0, reg_size)
+
 ...
 ---
 name:            dont_combine_lsb_too_large
@@ -181,17 +192,17 @@ body:             |
   bb.0:
     liveins: $w0
 
-    ; LSB must be in [0, reg_size)
 
     ; CHECK-LABEL: name: dont_combine_lsb_too_large
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %too_large:_(s32) = G_CONSTANT i32 32
-    ; CHECK: %mask:_(s32) = G_CONSTANT i32 255
-    ; CHECK: %shift:_(s32) = G_LSHR %x, %too_large(s32)
-    ; CHECK: %and:_(s32) = G_AND %shift, %mask
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %too_large:_(s32) = G_CONSTANT i32 32
+    ; CHECK-NEXT: %mask:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: %shift:_(s32) = G_LSHR %x, %too_large(s32)
+    ; CHECK-NEXT: %and:_(s32) = G_AND %shift, %mask
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %too_large:_(s32) = G_CONSTANT i32 32
     %mask:_(s32) = G_CONSTANT i32 255
@@ -210,15 +221,16 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: dont_combine_vector
     ; CHECK: liveins: $d0
-    ; CHECK: %x:_(<2 x s32>) = COPY $d0
-    ; CHECK: %lsb_cst:_(s32) = G_CONSTANT i32 5
-    ; CHECK: %lsb:_(<2 x s32>) = G_BUILD_VECTOR %lsb_cst(s32), %lsb_cst(s32)
-    ; CHECK: %mask_cst:_(s32) = G_CONSTANT i32 255
-    ; CHECK: %mask:_(<2 x s32>) = G_BUILD_VECTOR %mask_cst(s32), %mask_cst(s32)
-    ; CHECK: %shift:_(<2 x s32>) = G_LSHR %x, %lsb(<2 x s32>)
-    ; CHECK: %and:_(<2 x s32>) = G_AND %shift, %mask
-    ; CHECK: $d0 = COPY %and(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %lsb_cst:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: %lsb:_(<2 x s32>) = G_BUILD_VECTOR %lsb_cst(s32), %lsb_cst(s32)
+    ; CHECK-NEXT: %mask_cst:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: %mask:_(<2 x s32>) = G_BUILD_VECTOR %mask_cst(s32), %mask_cst(s32)
+    ; CHECK-NEXT: %shift:_(<2 x s32>) = G_LSHR %x, %lsb(<2 x s32>)
+    ; CHECK-NEXT: %and:_(<2 x s32>) = G_AND %shift, %mask
+    ; CHECK-NEXT: $d0 = COPY %and(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %x:_(<2 x s32>) = COPY $d0
     %lsb_cst:_(s32) = G_CONSTANT i32 5
     %lsb:_(<2 x s32>) = G_BUILD_VECTOR %lsb_cst, %lsb_cst
@@ -229,6 +241,9 @@ body:             |
     $d0 = COPY %and
     RET_ReallyLR implicit $d0
 
+# mask = 0111 1111 1111 ... 1111
+# mask + 1 = 1000 0000 0000 ... 0000
+
 ...
 ---
 name:            max_signed_int_mask
@@ -237,16 +252,15 @@ legalized: true
 body:             |
   bb.0:
     liveins: $x0
-    ; mask = 0111 1111 1111 ... 1111
-    ; mask + 1 = 1000 0000 0000 ... 0000
     ; CHECK-LABEL: name: max_signed_int_mask
     ; CHECK: liveins: $x0
-    ; CHECK: %x:_(s64) = COPY $x0
-    ; CHECK: %lsb:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
-    ; CHECK: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s64) = COPY $x0
+    ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK-NEXT: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s64) = COPY $x0
     %lsb:_(s64) = G_CONSTANT i64 0
     %mask:_(s64) = G_CONSTANT i64 9223372036854775807
@@ -255,6 +269,9 @@ body:             |
     $x0 = COPY %and
     RET_ReallyLR implicit $x0
 
+# mask = 1111 1111 1111 ... 1111
+# mask + 1 = 0000 0000 0000 ... 000
+
 ...
 ---
 name:            max_unsigned_int_mask
@@ -263,16 +280,15 @@ legalized: true
 body:             |
   bb.0:
     liveins: $x0
-    ; mask = 1111 1111 1111 ... 1111
-    ; mask + 1 = 0000 0000 0000 ... 000
     ; CHECK-LABEL: name: max_unsigned_int_mask
     ; CHECK: liveins: $x0
-    ; CHECK: %x:_(s64) = COPY $x0
-    ; CHECK: %lsb:_(s64) = G_CONSTANT i64 5
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
-    ; CHECK: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s64) = COPY $x0
+    ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 5
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: %and:_(s64) = G_UBFX %x, %lsb(s64), [[C]]
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s64) = COPY $x0
     %lsb:_(s64) = G_CONSTANT i64 5
     %mask:_(s64) = G_CONSTANT i64 18446744073709551615

``````````

</details>


https://github.com/llvm/llvm-project/pull/97513


More information about the llvm-commits mailing list