[llvm] fda691e - [AArch64] Update checks in call lowering test for signext in prep for bug fix.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 5 13:40:09 PDT 2022


Author: Amara Emerson
Date: 2022-09-05T21:39:57+01:00
New Revision: fda691e18da04776a89e70152e055a5d8df6a42f

URL: https://github.com/llvm/llvm-project/commit/fda691e18da04776a89e70152e055a5d8df6a42f
DIFF: https://github.com/llvm/llvm-project/commit/fda691e18da04776a89e70152e055a5d8df6a42f.diff

LOG: [AArch64] Update checks in call lowering test for signext in prep for bug fix.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-signext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-signext.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-signext.ll
index 9c697fe786891..b1bcc017368cd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-signext.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-signext.ll
@@ -6,25 +6,27 @@
 define i8 @signext_param_i8(i8 signext %x) {
   ; CHECK-LABEL: name: signext_param_i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: $w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-  ; CHECK:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 8
-  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
-  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
-  ; CHECK:   $w0 = COPY [[ANYEXT]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 8
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
+  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
+  ; CHECK-NEXT:   $w0 = COPY [[ANYEXT]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   ret i8 %x
 }
 
 define i8 @no_signext_param(i8 %x) {
   ; CHECK-LABEL: name: no_signext_param
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: $w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
-  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
-  ; CHECK:   $w0 = COPY [[ANYEXT]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
+  ; CHECK-NEXT:   $w0 = COPY [[ANYEXT]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   ret i8 %x
 }
 
@@ -32,10 +34,11 @@ define i8 @no_signext_param(i8 %x) {
 define i32 @signext_param_i32(i32 signext %x) {
   ; CHECK-LABEL: name: signext_param_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: $w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-  ; CHECK:   $w0 = COPY [[COPY]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   $w0 = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   ret i32 %x
 }
 
@@ -43,26 +46,27 @@ define i32 @signext_param_i32(i32 signext %x) {
 define i32 @signext_param_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
   ; CHECK-LABEL: name: signext_param_stack
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-  ; CHECK:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-  ; CHECK:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
-  ; CHECK:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
-  ; CHECK:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
-  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
-  ; CHECK:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; CHECK:   [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
-  ; CHECK:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[SEXTLOAD]], 1
-  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
-  ; CHECK:   [[ASSERT_ZEXT:%[0-9]+]]:_(s8) = G_ASSERT_ZEXT [[TRUNC]], 1
-  ; CHECK:   [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s8)
-  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s1)
-  ; CHECK:   $w0 = COPY [[ZEXT]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
+  ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
+  ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; CHECK-NEXT:   [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
+  ; CHECK-NEXT:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[SEXTLOAD]], 1
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
+  ; CHECK-NEXT:   [[ASSERT_ZEXT:%[0-9]+]]:_(s8) = G_ASSERT_ZEXT [[TRUNC]], 1
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s8)
+  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s1)
+  ; CHECK-NEXT:   $w0 = COPY [[ZEXT]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
                                 i64 %g, i64 %h, i64 %i, i1 signext %j) {
   %v = zext i1 %j to i32
   ret i32 %v
@@ -72,21 +76,22 @@ define i32 @signext_param_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
 define i32 @dont_need_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
   ; CHECK-LABEL: name: dont_need_assert_zext_stack
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-  ; CHECK:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-  ; CHECK:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
-  ; CHECK:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
-  ; CHECK:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
-  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
-  ; CHECK:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s32) from %fixed-stack.0, align 8)
-  ; CHECK:   $w0 = COPY [[LOAD1]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
+  ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
+  ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s32) from %fixed-stack.0, align 8)
+  ; CHECK-NEXT:   $w0 = COPY [[LOAD1]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
                                         i64 %f, i64 %g, i64 %h, i64 %i,
                                         i32 signext %j) {
   ret i32 %j
@@ -96,24 +101,25 @@ define i32 @dont_need_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
 define i8 @s8_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
   ; CHECK-LABEL: name: s8_assert_zext_stack
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-  ; CHECK:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-  ; CHECK:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
-  ; CHECK:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
-  ; CHECK:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
-  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
-  ; CHECK:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; CHECK:   [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
-  ; CHECK:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[SEXTLOAD]], 8
-  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
-  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
-  ; CHECK:   $w0 = COPY [[ANYEXT]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
+  ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
+  ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; CHECK-NEXT:   [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
+  ; CHECK-NEXT:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[SEXTLOAD]], 8
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
+  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
+  ; CHECK-NEXT:   $w0 = COPY [[ANYEXT]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
                                         i64 %f, i64 %g, i64 %h, i64 %i,
                                         i8 signext %j) {
   ret i8 %j


        


More information about the llvm-commits mailing list