[llvm] c16fa78 - GlobalISel: update legalize-rotr-rotl.mir checks before change.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 27 22:10:16 PDT 2022


Author: Amara Emerson
Date: 2022-07-27T22:10:04-07:00
New Revision: c16fa781f47378a63a515d07c169213573ecd72e

URL: https://github.com/llvm/llvm-project/commit/c16fa781f47378a63a515d07c169213573ecd72e
DIFF: https://github.com/llvm/llvm-project/commit/c16fa781f47378a63a515d07c169213573ecd72e.diff

LOG: GlobalISel: update legalize-rotr-rotl.mir checks before change.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-rotr-rotl.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rotr-rotl.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rotr-rotl.mir
index add9ce8edbe26..79aaed54d7e00 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rotr-rotl.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rotr-rotl.mir
@@ -9,12 +9,13 @@ body:             |
     liveins: $w0, $w1
     ; CHECK-LABEL: name: rotr_s32
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
-    ; CHECK: %rot:_(s32) = G_ROTR [[COPY]], [[SEXT]](s64)
-    ; CHECK: $w0 = COPY %rot(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
+    ; CHECK-NEXT: %rot:_(s32) = G_ROTR [[COPY]], [[SEXT]](s64)
+    ; CHECK-NEXT: $w0 = COPY %rot(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %rot:_(s32) = G_ROTR %0(s32), %1(s32)
@@ -31,11 +32,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: rotr_s64
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: %rot:_(s64) = G_ROTR [[COPY]], [[COPY1]](s64)
-    ; CHECK: $x0 = COPY %rot(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: %rot:_(s64) = G_ROTR [[COPY]], [[COPY1]](s64)
+    ; CHECK-NEXT: $x0 = COPY %rot(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
     %rot:_(s64) = G_ROTR %0(s64), %1(s64)
@@ -52,14 +54,15 @@ body:             |
     liveins: $w0, $w1
     ; CHECK-LABEL: name: rotl_s32
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
-    ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SUB]](s32)
-    ; CHECK: %rot:_(s32) = G_ROTR [[COPY]], [[SEXT]](s64)
-    ; CHECK: $w0 = COPY %rot(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SUB]](s32)
+    ; CHECK-NEXT: %rot:_(s32) = G_ROTR [[COPY]], [[SEXT]](s64)
+    ; CHECK-NEXT: $w0 = COPY %rot(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %rot:_(s32) = G_ROTL %0(s32), %1(s32)
@@ -76,13 +79,14 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: rotl_s64
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
-    ; CHECK: %rot:_(s64) = G_ROTR [[COPY]], [[SUB]](s64)
-    ; CHECK: $x0 = COPY %rot(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; CHECK-NEXT: %rot:_(s64) = G_ROTR [[COPY]], [[SUB]](s64)
+    ; CHECK-NEXT: $x0 = COPY %rot(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
     %rot:_(s64) = G_ROTL %0(s64), %1(s64)
@@ -100,20 +104,21 @@ body:             |
 
     ; CHECK-LABEL: name: test_rotl_v4s32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
-    ; CHECK: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
-    ; CHECK: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
-    ; CHECK: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND]](<4 x s32>)
-    ; CHECK: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
-    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND1]](<4 x s32>)
-    ; CHECK: %rot:_(<4 x s32>) = G_OR [[SHL]], [[LSHR]]
-    ; CHECK: $q0 = COPY %rot(<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND]](<4 x s32>)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND1]](<4 x s32>)
+    ; CHECK-NEXT: %rot:_(<4 x s32>) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $q0 = COPY %rot(<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %rot:_(<4 x s32>) = G_ROTL %0(<4 x s32>), %1(<4 x s32>)
@@ -131,20 +136,21 @@ body:             |
 
     ; CHECK-LABEL: name: test_rotr_v4s32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
-    ; CHECK: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
-    ; CHECK: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
-    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND]](<4 x s32>)
-    ; CHECK: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
-    ; CHECK: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND1]](<4 x s32>)
-    ; CHECK: %rot:_(<4 x s32>) = G_OR [[LSHR]], [[SHL]]
-    ; CHECK: $q0 = COPY %rot(<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND]](<4 x s32>)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND1]](<4 x s32>)
+    ; CHECK-NEXT: %rot:_(<4 x s32>) = G_OR [[LSHR]], [[SHL]]
+    ; CHECK-NEXT: $q0 = COPY %rot(<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %rot:_(<4 x s32>) = G_ROTR %0(<4 x s32>), %1(<4 x s32>)


        


More information about the llvm-commits mailing list