[llvm] e0a080d - AArch64/GlobalISel: Update tests to use correct memory types

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 16 08:41:38 PDT 2021


Author: Matt Arsenault
Date: 2021-07-16T11:41:32-04:00
New Revision: e0a080d3484bc1e0ea28cf2a815a9b9fff0cdefe

URL: https://github.com/llvm/llvm-project/commit/e0a080d3484bc1e0ea28cf2a815a9b9fff0cdefe
DIFF: https://github.com/llvm/llvm-project/commit/e0a080d3484bc1e0ea28cf2a815a9b9fff0cdefe.diff

LOG: AArch64/GlobalISel: Update tests to use correct memory types

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir
    llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir b/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
index d31306892a7a..33be87e3bf2b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
@@ -21,9 +21,9 @@ body:             |
     ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
     ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (s128))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (s128))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (s128))
+    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
     ; CHECK: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
@@ -45,9 +45,9 @@ body:             |
   %unmerge1:_(<6 x s64>), %deaddef1:_(<6 x s64>), %deaddef2:_(<6 x s64>), %deaddef3:_(<6 x s64>) = G_UNMERGE_VALUES %bigconcat:_(<24 x s64>)
   %val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>) = G_UNMERGE_VALUES %unmerge1:_(<6 x s64>)
 
-  G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store 16)
-  G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store 16)
-  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
+  G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
+  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   RET_ReallyLR
 
 ...
@@ -73,9 +73,9 @@ body:             |
     ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
     ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (s128))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (s128))
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (s128))
+    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
     ; CHECK: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
@@ -97,9 +97,9 @@ body:             |
   %deaddef1:_(<6 x s64>), %deaddef2:_(<6 x s64>), %deaddef3:_(<6 x s64>), %unmerge1:_(<6 x s64>) = G_UNMERGE_VALUES %bigconcat:_(<24 x s64>)
   %val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>) = G_UNMERGE_VALUES %unmerge1:_(<6 x s64>)
 
-  G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store 16)
-  G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store 16)
-  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
+  G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
+  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   RET_ReallyLR
 
 ...
@@ -124,8 +124,8 @@ body:             |
     ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
     ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (s128))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (s128))
+    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
     ; CHECK: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
@@ -144,9 +144,9 @@ body:             |
   %val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>), %val4:_(<2 x s64>) = G_UNMERGE_VALUES %insert:_(<8 x s64>)
 
   ; val1 should be <%5, %6>
-  G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val1:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   ; val2 should be <%6, %8>
-  G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val2:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   RET_ReallyLR
 
 ...
@@ -171,8 +171,8 @@ body:             |
     ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
     ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (s128))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (s128))
+    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
     ; CHECK: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
@@ -191,9 +191,9 @@ body:             |
   %val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>), %val4:_(<2 x s64>) = G_UNMERGE_VALUES %insert:_(<8 x s64>)
 
   ; val3 should be <%5, %6>
-  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   ; val4 should be <%6, %8>
-  G_STORE %val4:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val4:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   RET_ReallyLR
 
 ...
@@ -218,8 +218,8 @@ body:             |
     ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
     ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (s128))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (s128))
+    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
     ; CHECK: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
@@ -240,9 +240,9 @@ body:             |
   %val1:_(<2 x s64>), %val2:_(<2 x s64>), %val3:_(<2 x s64>), %val4:_(<2 x s64>) = G_UNMERGE_VALUES %insert:_(<8 x s64>)
 
   ; val3 should be <%5, %6>
-  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val3:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   ; val4 should be <%6, %8>
-  G_STORE %val4:_(<2 x s64>), %2:_(p0) :: (store 16)
+  G_STORE %val4:_(<2 x s64>), %2:_(p0) :: (store (<2 x s64>))
   RET_ReallyLR
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir
index bdd3ebb53ef6..d90ed320e464 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir
@@ -283,9 +283,9 @@ body:             |
     %9:_(s64) = COPY $d6
     %10:_(s64) = COPY $d7
     %15:_(p0) = G_FRAME_INDEX %fixed-stack.3
-    %11:_(s64) = G_LOAD %15(p0) :: (invariant load 8 from %fixed-stack.3, align 16)
+    %11:_(s64) = G_LOAD %15(p0) :: (invariant load (s64) from %fixed-stack.3, align 16)
     %16:_(p0) = G_FRAME_INDEX %fixed-stack.2
-    %12:_(s64) = G_LOAD %16(p0) :: (invariant load 8 from %fixed-stack.2)
+    %12:_(s64) = G_LOAD %16(p0) :: (invariant load (s64) from %fixed-stack.2)
     %17:_(p0) = G_FRAME_INDEX %fixed-stack.1
     %13:_(s64) = G_LOAD %17(p0) :: (invariant load 8 from %fixed-stack.1, align 16)
     %18:_(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -293,7 +293,7 @@ body:             |
     %1:_(<6 x s64>) = G_BUILD_VECTOR %9(s64), %10(s64), %11(s64), %12(s64), %13(s64), %14(s64)
     %2:_(p0) = COPY $x0
     %19:_(<6 x s64>) = G_SHUFFLE_VECTOR %0(<6 x s64>), %1, shufflemask(3, 4, 7, 0, 1, 11)
-    G_STORE %19(<6 x s64>), %2(p0) :: (store 48, align 64)
+    G_STORE %19(<6 x s64>), %2(p0) :: (store (<6 x s64>), align 64)
     RET_ReallyLR
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
index ff5143a737ee..2529e4ba0daf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
@@ -208,11 +208,11 @@ body:             |
     ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
     ; CHECK: [[COPY2:%[0-9]+]]:gpr64all = COPY $x2
     ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY [[COPY2]]
-    ; CHECK: STRXroX [[COPY3]], [[COPY1]], [[COPY]], 0, 1 :: (store (s64) into %ir.addr)
+    ; CHECK: STRXroX [[COPY3]], [[COPY1]], [[COPY]], 0, 1 :: (store (p0) into %ir.addr)
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
     %3:gpr(p0) = COPY $x1
     %ptr:gpr(p0) = G_PTR_ADD %3, %2
     %4:gpr(p0) = COPY $x2
-    G_STORE %4, %ptr :: (store (s64) into %ir.addr)
+    G_STORE %4, %ptr :: (store (p0) into %ir.addr)


        


More information about the llvm-commits mailing list