[llvm] 49ecd66 - [AArch64][GlobalISel] Legalize unhandled G_BITREVERSE by lowering.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Sat Dec 21 20:01:45 PST 2024


Author: Amara Emerson
Date: 2024-12-21T20:01:35-08:00
New Revision: 49ecd665fcc311f7cd61e81d3f7a112cd287e292

URL: https://github.com/llvm/llvm-project/commit/49ecd665fcc311f7cd61e81d3f7a112cd287e292
DIFF: https://github.com/llvm/llvm-project/commit/49ecd665fcc311f7cd61e81d3f7a112cd287e292.diff

LOG: [AArch64][GlobalISel] Legalize unhandled G_BITREVERSE by lowering.

This fixes fallbacks on <4 x s16> types.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 155d98f0865f7a..4b7d4158faf069 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1038,7 +1038,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
   getActionDefinitionsBuilder(G_BITREVERSE)
       .legalFor({s32, s64, v8s8, v16s8})
       .widenScalarToNextPow2(0, /*Min = */ 32)
-      .clampScalar(0, s32, s64);
+      .clampScalar(0, s32, s64)
+      .lower();
 
   getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF).lower();
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir
index 8d8ede1596c9f7..2e6f55916547db 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir
@@ -10,10 +10,11 @@ body: |
     liveins: $w0
     ; CHECK-LABEL: name: s32_legal
     ; CHECK: liveins: $w0
-    ; CHECK: %copy:_(s32) = COPY $w0
-    ; CHECK: %bitreverse:_(s32) = G_BITREVERSE %copy
-    ; CHECK: $w0 = COPY %bitreverse(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s32) = COPY $w0
+    ; CHECK-NEXT: %bitreverse:_(s32) = G_BITREVERSE %copy
+    ; CHECK-NEXT: $w0 = COPY %bitreverse(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:_(s32) = COPY $w0
     %bitreverse:_(s32) = G_BITREVERSE %copy
     $w0 = COPY %bitreverse
@@ -27,10 +28,11 @@ body: |
     liveins: $x0
     ; CHECK-LABEL: name: s64_legal
     ; CHECK: liveins: $x0
-    ; CHECK: %copy:_(s64) = COPY $x0
-    ; CHECK: %bitreverse:_(s64) = G_BITREVERSE %copy
-    ; CHECK: $x0 = COPY %bitreverse(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s64) = COPY $x0
+    ; CHECK-NEXT: %bitreverse:_(s64) = G_BITREVERSE %copy
+    ; CHECK-NEXT: $x0 = COPY %bitreverse(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:_(s64) = COPY $x0
     %bitreverse:_(s64) = G_BITREVERSE %copy
     $x0 = COPY %bitreverse
@@ -44,10 +46,11 @@ body: |
     liveins: $x0
     ; CHECK-LABEL: name: v8s8_legal
     ; CHECK: liveins: $x0
-    ; CHECK: %vec:_(<8 x s8>) = G_IMPLICIT_DEF
-    ; CHECK: %bitreverse:_(<8 x s8>) = G_BITREVERSE %vec
-    ; CHECK: $x0 = COPY %bitreverse(<8 x s8>)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: %bitreverse:_(<8 x s8>) = G_BITREVERSE %vec
+    ; CHECK-NEXT: $x0 = COPY %bitreverse(<8 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %vec:_(<8 x s8>) = G_IMPLICIT_DEF
     %bitreverse:_(<8 x s8>) = G_BITREVERSE %vec
     $x0 = COPY %bitreverse
@@ -61,10 +64,11 @@ body: |
     liveins: $q0
     ; CHECK-LABEL: name: v16s8_legal
     ; CHECK: liveins: $q0
-    ; CHECK: %vec:_(<16 x s8>) = G_IMPLICIT_DEF
-    ; CHECK: %bitreverse:_(<16 x s8>) = G_BITREVERSE %vec
-    ; CHECK: $q0 = COPY %bitreverse(<16 x s8>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: %bitreverse:_(<16 x s8>) = G_BITREVERSE %vec
+    ; CHECK-NEXT: $q0 = COPY %bitreverse(<16 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %vec:_(<16 x s8>) = G_IMPLICIT_DEF
     %bitreverse:_(<16 x s8>) = G_BITREVERSE %vec
     $q0 = COPY %bitreverse
@@ -78,14 +82,15 @@ body: |
     liveins: $b0
     ; CHECK-LABEL: name: s8_widen
     ; CHECK: liveins: $b0
-    ; CHECK: %copy:_(s8) = COPY $b0
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %copy(s8)
-    ; CHECK: [[BITREVERSE:%[0-9]+]]:_(s32) = G_BITREVERSE [[ANYEXT]]
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITREVERSE]], [[C]](s64)
-    ; CHECK: %bitreverse:_(s8) = G_TRUNC [[LSHR]](s32)
-    ; CHECK: $b0 = COPY %bitreverse(s8)
-    ; CHECK: RET_ReallyLR implicit $b0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s8) = COPY $b0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %copy(s8)
+    ; CHECK-NEXT: [[BITREVERSE:%[0-9]+]]:_(s32) = G_BITREVERSE [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITREVERSE]], [[C]](s64)
+    ; CHECK-NEXT: %bitreverse:_(s8) = G_TRUNC [[LSHR]](s32)
+    ; CHECK-NEXT: $b0 = COPY %bitreverse(s8)
+    ; CHECK-NEXT: RET_ReallyLR implicit $b0
     %copy:_(s8) = COPY $b0
     %bitreverse:_(s8) = G_BITREVERSE %copy
     $b0 = COPY %bitreverse
@@ -99,14 +104,15 @@ body: |
     liveins: $b0
     ; CHECK-LABEL: name: s3_widen
     ; CHECK: liveins: $b0
-    ; CHECK: %copy:_(s8) = COPY $b0
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %copy(s8)
-    ; CHECK: [[BITREVERSE:%[0-9]+]]:_(s32) = G_BITREVERSE [[ANYEXT]]
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 29
-    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITREVERSE]], [[C]](s64)
-    ; CHECK: %ext:_(s8) = G_TRUNC [[LSHR]](s32)
-    ; CHECK: $b0 = COPY %ext(s8)
-    ; CHECK: RET_ReallyLR implicit $b0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s8) = COPY $b0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %copy(s8)
+    ; CHECK-NEXT: [[BITREVERSE:%[0-9]+]]:_(s32) = G_BITREVERSE [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 29
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITREVERSE]], [[C]](s64)
+    ; CHECK-NEXT: %ext:_(s8) = G_TRUNC [[LSHR]](s32)
+    ; CHECK-NEXT: $b0 = COPY %ext(s8)
+    ; CHECK-NEXT: RET_ReallyLR implicit $b0
     %copy:_(s8) = COPY $b0
     %trunc:_(s3) = G_TRUNC %copy
     %bitreverse:_(s3) = G_BITREVERSE %trunc
@@ -122,14 +128,61 @@ body: |
     liveins: $q0
     ; CHECK-LABEL: name: s128_narrow
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:_(s128) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %copy(s128)
-    ; CHECK: [[BITREVERSE:%[0-9]+]]:_(s64) = G_BITREVERSE [[UV1]]
-    ; CHECK: [[BITREVERSE1:%[0-9]+]]:_(s64) = G_BITREVERSE [[UV]]
-    ; CHECK: %bitreverse:_(s128) = G_MERGE_VALUES [[BITREVERSE]](s64), [[BITREVERSE1]](s64)
-    ; CHECK: $q0 = COPY %bitreverse(s128)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s128) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %copy(s128)
+    ; CHECK-NEXT: [[BITREVERSE:%[0-9]+]]:_(s64) = G_BITREVERSE [[UV1]]
+    ; CHECK-NEXT: [[BITREVERSE1:%[0-9]+]]:_(s64) = G_BITREVERSE [[UV]]
+    ; CHECK-NEXT: %bitreverse:_(s128) = G_MERGE_VALUES [[BITREVERSE]](s64), [[BITREVERSE1]](s64)
+    ; CHECK-NEXT: $q0 = COPY %bitreverse(s128)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:_(s128) = COPY $q0
     %bitreverse:_(s128) = G_BITREVERSE %copy
     $q0 = COPY %bitreverse
     RET_ReallyLR implicit $q0
+...
+---
+name:            v4s16
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $d0
+    ; CHECK-LABEL: name: v4s16
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %vec:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[BSWAP:%[0-9]+]]:_(<4 x s16>) = G_BSWAP %vec
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3856
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[BSWAP]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s16>) = G_LSHR [[AND]], [[BUILD_VECTOR]](<4 x s16>)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s16>) = G_SHL [[BSWAP]], [[BUILD_VECTOR]](<4 x s16>)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s16>) = G_AND [[SHL]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s16>) = G_OR [[LSHR]], [[AND1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+    ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -13108
+    ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[C3]](s16), [[C3]](s16), [[C3]](s16), [[C3]](s16)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<4 x s16>) = G_AND [[OR]], [[BUILD_VECTOR3]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<4 x s16>) = G_LSHR [[AND2]], [[BUILD_VECTOR2]](<4 x s16>)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(<4 x s16>) = G_SHL [[OR]], [[BUILD_VECTOR2]](<4 x s16>)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(<4 x s16>) = G_AND [[SHL1]], [[BUILD_VECTOR3]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<4 x s16>) = G_OR [[LSHR1]], [[AND3]]
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+    ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[C4]](s16), [[C4]](s16), [[C4]](s16), [[C4]](s16)
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -21846
+    ; CHECK-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[C5]](s16), [[C5]](s16), [[C5]](s16), [[C5]](s16)
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(<4 x s16>) = G_AND [[OR1]], [[BUILD_VECTOR5]]
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(<4 x s16>) = G_LSHR [[AND4]], [[BUILD_VECTOR4]](<4 x s16>)
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(<4 x s16>) = G_SHL [[OR1]], [[BUILD_VECTOR4]](<4 x s16>)
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(<4 x s16>) = G_AND [[SHL2]], [[BUILD_VECTOR5]]
+    ; CHECK-NEXT: %bitreverse:_(<4 x s16>) = G_OR [[LSHR2]], [[AND5]]
+    ; CHECK-NEXT: $d0 = COPY %bitreverse(<4 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    %vec:_(<4 x s16>) = COPY $d0
+    %bitreverse:_(<4 x s16>) = G_BITREVERSE %vec
+    $d0 = COPY %bitreverse
+    RET_ReallyLR implicit $q0
+...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 7c9c958b5a8189..c2c77b9326cb64 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -698,8 +698,8 @@
 # DEBUG-NEXT: .. the first uncovered type index: 1, OK
 # DEBUG-NEXT: .. the first uncovered imm index: 0, OK
 # DEBUG-NEXT: G_BITREVERSE (opcode {{[0-9]+}}): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. the first uncovered type index: 1, OK
-# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_FCEIL (opcode {{[0-9]+}}): 1 type index, 0 imm indices
 # DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
 # DEBUG-NEXT: .. the first uncovered type index: 1, OK


        


More information about the llvm-commits mailing list