[llvm] 912af6b - [AMDGPU][GlobalISel] Remove the post ':' part of vreg operands in fsh combine tests.

Abinav Puthan Purayil via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 24 03:01:03 PST 2022


Author: Abinav Puthan Purayil
Date: 2022-01-24T16:30:40+05:30
New Revision: 912af6b570d6f70e107e4ddf54bc85cb8b63cc70

URL: https://github.com/llvm/llvm-project/commit/912af6b570d6f70e107e4ddf54bc85cb8b63cc70
DIFF: https://github.com/llvm/llvm-project/commit/912af6b570d6f70e107e4ddf54bc85cb8b63cc70.diff

LOG: [AMDGPU][GlobalISel] Remove the post ':' part of vreg operands in fsh combine tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir
index ad93f1bf4d39..a1eabb487448 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir
@@ -20,10 +20,10 @@ body: |
     %b:_(s32) = COPY $vgpr1
     %amt:_(s32) = COPY $vgpr2
     %bw:_(s32) = G_CONSTANT i32 32
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %lshr:_(s32) = G_LSHR %b:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %lshr:_(s32) = G_LSHR %b, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr3 = COPY %or
 ...
 
@@ -46,11 +46,11 @@ body: |
     %b:_(<2 x s32>) = COPY $vgpr2_vgpr3
     %amt:_(<2 x s32>) = COPY $vgpr4_vgpr5
     %scalar_bw:_(s32) = G_CONSTANT i32 32
-    %bw:_(<2 x s32>) = G_BUILD_VECTOR %scalar_bw(s32), %scalar_bw(s32)
-    %shl:_(<2 x s32>) = G_SHL %a:_, %amt:_(<2 x s32>)
-    %sub:_(<2 x s32>) = G_SUB %bw:_, %amt:_
-    %lshr:_(<2 x s32>) = G_LSHR %b:_, %sub:_(<2 x s32>)
-    %or:_(<2 x s32>) = G_OR %shl:_, %lshr:_
+    %bw:_(<2 x s32>) = G_BUILD_VECTOR %scalar_bw, %scalar_bw
+    %shl:_(<2 x s32>) = G_SHL %a, %amt
+    %sub:_(<2 x s32>) = G_SUB %bw, %amt
+    %lshr:_(<2 x s32>) = G_LSHR %b, %sub
+    %or:_(<2 x s32>) = G_OR %shl, %lshr
     $vgpr6_vgpr7 = COPY %or
 ...
 
@@ -73,10 +73,10 @@ body: |
     %b:_(s32) = COPY $vgpr1
     %amt:_(s32) = COPY $vgpr2
     %bw:_(s32) = G_CONSTANT i32 32
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %lshr:_(s32) = G_LSHR %b:_, %sub:_(s32)
-    %or:_(s32) = G_OR %lshr:_, %shl:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %lshr:_(s32) = G_LSHR %b, %sub
+    %or:_(s32) = G_OR %lshr, %shl
     $vgpr3 = COPY %or
 ...
 
@@ -99,10 +99,10 @@ body: |
     %b:_(s32) = COPY $vgpr1
     %amt:_(s32) = COPY $vgpr2
     %bw:_(s32) = G_CONSTANT i32 32
-    %lshr:_(s32) = G_LSHR %b:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %shl:_(s32) = G_SHL %a:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %lshr:_(s32) = G_LSHR %b, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %shl:_(s32) = G_SHL %a, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr3 = COPY %or
 ...
 
@@ -182,10 +182,10 @@ body: |
     %b:_(s32) = COPY $vgpr1
     %amt:_(s32) = COPY $vgpr2
     %bw:_(s32) = G_CONSTANT i32 31
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %lshr:_(s32) = G_LSHR %b:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %lshr:_(s32) = G_LSHR %b, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr3 = COPY %or
 ...
 
@@ -214,9 +214,9 @@ body: |
     %amt:_(s32) = COPY $vgpr2
     %amt1:_(s32) = COPY $vgpr3
     %bw:_(s32) = G_CONSTANT i32 32
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt1:_
-    %lshr:_(s32) = G_LSHR %b:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt1
+    %lshr:_(s32) = G_LSHR %b, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr4 = COPY %or
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir
index 2649ee4bdf72..e83bcbc293d5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir
@@ -18,10 +18,10 @@ body: |
     %a:_(s32) = COPY $vgpr0
     %amt:_(s32) = COPY $vgpr1
     %bw:_(s32) = G_CONSTANT i32 32
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %lshr:_(s32) = G_LSHR %a:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %lshr:_(s32) = G_LSHR %a, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr2 = COPY %or
 ...
 
@@ -42,11 +42,11 @@ body: |
     %a:_(<2 x s32>) = COPY $vgpr0_vgpr1
     %amt:_(<2 x s32>) = COPY $vgpr2_vgpr3
     %scalar_bw:_(s32) = G_CONSTANT i32 32
-    %bw:_(<2 x s32>) = G_BUILD_VECTOR %scalar_bw(s32), %scalar_bw(s32)
-    %shl:_(<2 x s32>) = G_SHL %a:_, %amt:_(<2 x s32>)
-    %sub:_(<2 x s32>) = G_SUB %bw:_, %amt:_
-    %lshr:_(<2 x s32>) = G_LSHR %a:_, %sub:_(<2 x s32>)
-    %or:_(<2 x s32>) = G_OR %shl:_, %lshr:_
+    %bw:_(<2 x s32>) = G_BUILD_VECTOR %scalar_bw, %scalar_bw
+    %shl:_(<2 x s32>) = G_SHL %a, %amt
+    %sub:_(<2 x s32>) = G_SUB %bw, %amt
+    %lshr:_(<2 x s32>) = G_LSHR %a, %sub
+    %or:_(<2 x s32>) = G_OR %shl, %lshr
     $vgpr4_vgpr5 = COPY %or
 ...
 
@@ -67,10 +67,10 @@ body: |
     %a:_(s32) = COPY $vgpr0
     %amt:_(s32) = COPY $vgpr1
     %bw:_(s32) = G_CONSTANT i32 32
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %lshr:_(s32) = G_LSHR %a:_, %sub:_(s32)
-    %or:_(s32) = G_OR %lshr:_, %shl:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %lshr:_(s32) = G_LSHR %a, %sub
+    %or:_(s32) = G_OR %lshr, %shl
     $vgpr2 = COPY %or
 ...
 
@@ -91,10 +91,10 @@ body: |
     %a:_(s32) = COPY $vgpr0
     %amt:_(s32) = COPY $vgpr1
     %bw:_(s32) = G_CONSTANT i32 32
-    %lshr:_(s32) = G_LSHR %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %shl:_(s32) = G_SHL %a:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %lshr:_(s32) = G_LSHR %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %shl:_(s32) = G_SHL %a, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr2 = COPY %or
 ...
 
@@ -169,10 +169,10 @@ body: |
     %a:_(s32) = COPY $vgpr0
     %amt:_(s32) = COPY $vgpr1
     %bw:_(s32) = G_CONSTANT i32 31
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt:_
-    %lshr:_(s32) = G_LSHR %a:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt
+    %lshr:_(s32) = G_LSHR %a, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr2 = COPY %or
 ...
 
@@ -199,9 +199,9 @@ body: |
     %amt:_(s32) = COPY $vgpr1
     %amt1:_(s32) = COPY $vgpr2
     %bw:_(s32) = G_CONSTANT i32 32
-    %shl:_(s32) = G_SHL %a:_, %amt:_(s32)
-    %sub:_(s32) = G_SUB %bw:_, %amt1:_
-    %lshr:_(s32) = G_LSHR %a:_, %sub:_(s32)
-    %or:_(s32) = G_OR %shl:_, %lshr:_
+    %shl:_(s32) = G_SHL %a, %amt
+    %sub:_(s32) = G_SUB %bw, %amt1
+    %lshr:_(s32) = G_LSHR %a, %sub
+    %or:_(s32) = G_OR %shl, %lshr
     $vgpr3 = COPY %or
 ...


        


More information about the llvm-commits mailing list