[llvm] 1700603 - [GlobalISel] Verify operand types for G_SHL, G_LSHR, G_ASHR

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 21 04:18:09 PST 2021


Author: Jay Foad
Date: 2021-12-21T11:59:33Z
New Revision: 17006033f9c763b80b1f59cb015cdbe934268b70

URL: https://github.com/llvm/llvm-project/commit/17006033f9c763b80b1f59cb015cdbe934268b70
DIFF: https://github.com/llvm/llvm-project/commit/17006033f9c763b80b1f59cb015cdbe934268b70.diff

LOG: [GlobalISel] Verify operand types for G_SHL, G_LSHR, G_ASHR

Differential Revision: https://reviews.llvm.org/D115868

Added: 
    llvm/test/MachineVerifier/test_g_shift.mir

Modified: 
    llvm/lib/CodeGen/MachineVerifier.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
    llvm/test/MachineVerifier/test_g_rotr_rotl.mir
    llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index b214ab7179ac3..005d4ad1a3280 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1608,12 +1608,16 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
     }
     break;
   }
+  case TargetOpcode::G_SHL:
+  case TargetOpcode::G_LSHR:
+  case TargetOpcode::G_ASHR:
   case TargetOpcode::G_ROTR:
   case TargetOpcode::G_ROTL: {
     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
     if (Src1Ty.isVector() != Src2Ty.isVector()) {
-      report("Rotate requires operands to be either all scalars or all vectors",
+      report("Shifts and rotates require operands to be either all scalars or "
+             "all vectors",
              MI);
       break;
     }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
index c5e0398c7bd06..4f0cb877ced77 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
@@ -176,15 +176,15 @@ body:             |
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN-NEXT: {{  $}}
     ; GCN-NEXT: %var:_(<2 x s16>) = COPY $vgpr0
-    ; GCN-NEXT: %shift_amt:_(<2 x s16>) = COPY $vgpr1
+    ; GCN-NEXT: %shift_amt:_(s32) = COPY $vgpr1
     ; GCN-NEXT: %two:_(s32) = G_CONSTANT i32 2
     ; GCN-NEXT: %four:_(s32) = G_CONSTANT i32 4
-    ; GCN-NEXT: %shift:_(s32) = G_SHL %two, %shift_amt(<2 x s16>)
+    ; GCN-NEXT: %shift:_(s32) = G_SHL %two, %shift_amt(s32)
     ; GCN-NEXT: %four_vec:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC %four(s32), %shift(s32)
     ; GCN-NEXT: %rem:_(<2 x s16>) = G_UREM %var, %four_vec
     ; GCN-NEXT: $vgpr0 = COPY %rem(<2 x s16>)
     %var:_(<2 x s16>) = COPY $vgpr0
-    %shift_amt:_(<2 x s16>) = COPY $vgpr1
+    %shift_amt:_(s32) = COPY $vgpr1
     %two:_(s32) = G_CONSTANT i32 2
     %four:_(s32) = G_CONSTANT i32 4
     %shift:_(s32) = G_SHL %two, %shift_amt

diff  --git a/llvm/test/MachineVerifier/test_g_rotr_rotl.mir b/llvm/test/MachineVerifier/test_g_rotr_rotl.mir
index 0d545662765ca..e1e66f883e5d4 100644
--- a/llvm/test/MachineVerifier/test_g_rotr_rotl.mir
+++ b/llvm/test/MachineVerifier/test_g_rotr_rotl.mir
@@ -7,7 +7,7 @@ body: |
     %src:_(<2 x s64>) = G_IMPLICIT_DEF
     %amt:_(s64) = G_IMPLICIT_DEF
 
-    ; CHECK: Rotate requires operands to be either all scalars or all vectors
+    ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors
     %rotr:_(<2 x s64>) = G_ROTR %src, %amt
 
 ...

diff  --git a/llvm/test/MachineVerifier/test_g_shift.mir b/llvm/test/MachineVerifier/test_g_shift.mir
new file mode 100644
index 0000000000000..9ab904b98f238
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_g_shift.mir
@@ -0,0 +1,21 @@
+# RUN: not --crash llc -march=arm64 -verify-machineinstrs -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+# REQUIRES: aarch64-registered-target
+
+---
+name: test_shift
+body: |
+  bb.0:
+    %s32:_(s32) = G_IMPLICIT_DEF
+    %v2s32:_(<2 x s32>) = G_IMPLICIT_DEF
+    %s64:_(s64) = G_IMPLICIT_DEF
+    %v2s64:_(<2 x s64>) = G_IMPLICIT_DEF
+
+    ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors
+    %shl:_(<2 x s64>) = G_SHL %v2s64, %s64
+
+    ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors
+    %lshr:_(s32) = G_LSHR %s32, %v2s32
+
+    ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors
+    %ashr:_(<2 x s32>) = G_ASHR %v2s32, %s64
+...

diff  --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
index ff239fdfa9c1a..dc915d5f5e216 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
@@ -201,10 +201,11 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorDecreasingCstPHIWithLoop) {
    %10:_(s8) = G_CONSTANT i8 5
    %11:_(<2 x s8>) = G_BUILD_VECTOR %10:_(s8), %10:_(s8)
    %12:_(s8) = G_CONSTANT i8 1
+   %16:_(<2 x s8>) = G_BUILD_VECTOR %12:_(s8), %12:_(s8)
 
    bb.12:
    %13:_(<2 x s8>) = PHI %11(<2 x s8>), %bb.10, %14(<2 x s8>), %bb.12
-   %14:_(<2 x s8>) = G_LSHR %13, %12
+   %14:_(<2 x s8>) = G_LSHR %13, %16
    %15:_(<2 x s8>) = COPY %14
    G_BR %bb.12
 )";


        


More information about the llvm-commits mailing list