[llvm] be0924a - [Tests] Update some tests for D104765. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 6 11:24:08 PDT 2021


Author: David Green
Date: 2021-07-06T19:23:52+01:00
New Revision: be0924ad179eb7113fb7539bb2d7fc681ffd9ed9

URL: https://github.com/llvm/llvm-project/commit/be0924ad179eb7113fb7539bb2d7fc681ffd9ed9
DIFF: https://github.com/llvm/llvm-project/commit/be0924ad179eb7113fb7539bb2d7fc681ffd9ed9.diff

LOG: [Tests] Update some tests for D104765. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/ARM/add-like-or.ll
    llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
    llvm/test/CodeGen/SystemZ/addr-01.ll
    llvm/test/CodeGen/SystemZ/addr-02.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/add-like-or.ll b/llvm/test/CodeGen/ARM/add-like-or.ll
index d958478e10846..943917960662f 100644
--- a/llvm/test/CodeGen/ARM/add-like-or.ll
+++ b/llvm/test/CodeGen/ARM/add-like-or.ll
@@ -1,9 +1,54 @@
-; RUN: llc -mtriple=thumbv6m-apple-macho %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-T1
-; RUN: llc -mtriple=thumbv7m-apple-macho %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-T2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv6m-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-T1
+; RUN: llc -mtriple=thumbv7m-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-T2
+; RUN: llc -mtriple=armv7a-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-A
 
 define i32 @test_add_i3(i1 %tst, i32 %a, i32 %b) {
-; CHECK-LABEL: test_add_i3:
-; CHECK: adds r0, {{r[0-9]+}}, #2
+; CHECK-T1-LABEL: test_add_i3:
+; CHECK-T1:       @ %bb.0:
+; CHECK-T1-NEXT:    .save {r4, lr}
+; CHECK-T1-NEXT:    push {r4, lr}
+; CHECK-T1-NEXT:    lsls r0, r0, #31
+; CHECK-T1-NEXT:    bne .LBB0_2
+; CHECK-T1-NEXT:  @ %bb.1:
+; CHECK-T1-NEXT:    movs r0, #3
+; CHECK-T1-NEXT:    bics r2, r0
+; CHECK-T1-NEXT:    mov r4, r2
+; CHECK-T1-NEXT:    b .LBB0_3
+; CHECK-T1-NEXT:  .LBB0_2:
+; CHECK-T1-NEXT:    mov r4, r1
+; CHECK-T1-NEXT:    movs r0, #6
+; CHECK-T1-NEXT:    bics r4, r0
+; CHECK-T1-NEXT:  .LBB0_3:
+; CHECK-T1-NEXT:    mov r0, r4
+; CHECK-T1-NEXT:    bl foo
+; CHECK-T1-NEXT:    adds r0, r4, #2
+; CHECK-T1-NEXT:    pop {r4, pc}
+;
+; CHECK-T2-LABEL: test_add_i3:
+; CHECK-T2:       @ %bb.0:
+; CHECK-T2-NEXT:    .save {r4, lr}
+; CHECK-T2-NEXT:    push {r4, lr}
+; CHECK-T2-NEXT:    lsls r0, r0, #31
+; CHECK-T2-NEXT:    bic r4, r2, #3
+; CHECK-T2-NEXT:    it ne
+; CHECK-T2-NEXT:    bicne r4, r1, #6
+; CHECK-T2-NEXT:    mov r0, r4
+; CHECK-T2-NEXT:    bl foo
+; CHECK-T2-NEXT:    adds r0, r4, #2
+; CHECK-T2-NEXT:    pop {r4, pc}
+;
+; CHECK-A-LABEL: test_add_i3:
+; CHECK-A:       @ %bb.0:
+; CHECK-A-NEXT:    .save {r4, lr}
+; CHECK-A-NEXT:    push {r4, lr}
+; CHECK-A-NEXT:    bic r4, r2, #3
+; CHECK-A-NEXT:    tst r0, #1
+; CHECK-A-NEXT:    bicne r4, r1, #6
+; CHECK-A-NEXT:    mov r0, r4
+; CHECK-A-NEXT:    bl foo
+; CHECK-A-NEXT:    orr r0, r4, #2
+; CHECK-A-NEXT:    pop {r4, pc}
   %tmp = and i32 %a, -7
   %tmp1 = and i32 %b, -4
   %int = select i1 %tst, i32 %tmp, i32 %tmp1
@@ -16,10 +61,43 @@ define i32 @test_add_i3(i1 %tst, i32 %a, i32 %b) {
 }
 
 define i32 @test_add_i8(i32 %a, i32 %b, i1 %tst) {
-; CHECK-LABEL: test_add_i8:
-; CHECK-T1: adds r0, #12
-; CHECK-T2: add.w r0, {{r[0-9]+}}, #12
-
+; CHECK-T1-LABEL: test_add_i8:
+; CHECK-T1:       @ %bb.0:
+; CHECK-T1-NEXT:    lsls r2, r2, #31
+; CHECK-T1-NEXT:    bne .LBB1_2
+; CHECK-T1-NEXT:  @ %bb.1:
+; CHECK-T1-NEXT:    ldr r0, .LCPI1_0
+; CHECK-T1-NEXT:    ands r1, r0
+; CHECK-T1-NEXT:    mov r0, r1
+; CHECK-T1-NEXT:    adds r0, #12
+; CHECK-T1-NEXT:    bx lr
+; CHECK-T1-NEXT:  .LBB1_2:
+; CHECK-T1-NEXT:    movs r1, #255
+; CHECK-T1-NEXT:    bics r0, r1
+; CHECK-T1-NEXT:    adds r0, #12
+; CHECK-T1-NEXT:    bx lr
+; CHECK-T1-NEXT:    .p2align 2
+; CHECK-T1-NEXT:  @ %bb.3:
+; CHECK-T1-NEXT:  .LCPI1_0:
+; CHECK-T1-NEXT:    .long 4294966784 @ 0xfffffe00
+;
+; CHECK-T2-LABEL: test_add_i8:
+; CHECK-T2:       @ %bb.0:
+; CHECK-T2-NEXT:    movw r3, #511
+; CHECK-T2-NEXT:    bics r1, r3
+; CHECK-T2-NEXT:    lsls r2, r2, #31
+; CHECK-T2-NEXT:    it ne
+; CHECK-T2-NEXT:    bicne r1, r0, #255
+; CHECK-T2-NEXT:    add.w r0, r1, #12
+; CHECK-T2-NEXT:    bx lr
+;
+; CHECK-A-LABEL: test_add_i8:
+; CHECK-A:       @ %bb.0:
+; CHECK-A-NEXT:    bfc r1, #0, #9
+; CHECK-A-NEXT:    tst r2, #1
+; CHECK-A-NEXT:    bicne r1, r0, #255
+; CHECK-A-NEXT:    orr r0, r1, #12
+; CHECK-A-NEXT:    bx lr
   %tmp = and i32 %a, -256
   %tmp1 = and i32 %b, -512
   %int = select i1 %tst, i32 %tmp, i32 %tmp1
@@ -28,9 +106,51 @@ define i32 @test_add_i8(i32 %a, i32 %b, i1 %tst) {
 }
 
 define i32 @test_add_i12(i32 %a, i32 %b, i1 %tst) {
-; CHECK-LABEL: test_add_i12:
-; CHECK-T2: addw r0, {{r[0-9]+}}, #854
-
+; CHECK-T1-LABEL: test_add_i12:
+; CHECK-T1:       @ %bb.0:
+; CHECK-T1-NEXT:    lsls r2, r2, #31
+; CHECK-T1-NEXT:    bne .LBB2_2
+; CHECK-T1-NEXT:  @ %bb.1:
+; CHECK-T1-NEXT:    ldr r0, .LCPI2_1
+; CHECK-T1-NEXT:    ands r1, r0
+; CHECK-T1-NEXT:    mov r0, r1
+; CHECK-T1-NEXT:    b .LBB2_3
+; CHECK-T1-NEXT:  .LBB2_2:
+; CHECK-T1-NEXT:    ldr r1, .LCPI2_0
+; CHECK-T1-NEXT:    ands r0, r1
+; CHECK-T1-NEXT:  .LBB2_3:
+; CHECK-T1-NEXT:    ldr r1, .LCPI2_2
+; CHECK-T1-NEXT:    adds r0, r0, r1
+; CHECK-T1-NEXT:    bx lr
+; CHECK-T1-NEXT:    .p2align 2
+; CHECK-T1-NEXT:  @ %bb.4:
+; CHECK-T1-NEXT:  .LCPI2_0:
+; CHECK-T1-NEXT:    .long 4294963200 @ 0xfffff000
+; CHECK-T1-NEXT:  .LCPI2_1:
+; CHECK-T1-NEXT:    .long 4294959104 @ 0xffffe000
+; CHECK-T1-NEXT:  .LCPI2_2:
+; CHECK-T1-NEXT:    .long 854 @ 0x356
+;
+; CHECK-T2-LABEL: test_add_i12:
+; CHECK-T2:       @ %bb.0:
+; CHECK-T2-NEXT:    movw r3, #8191
+; CHECK-T2-NEXT:    bics r1, r3
+; CHECK-T2-NEXT:    movw r12, #4095
+; CHECK-T2-NEXT:    lsls r2, r2, #31
+; CHECK-T2-NEXT:    it ne
+; CHECK-T2-NEXT:    bicne.w r1, r0, r12
+; CHECK-T2-NEXT:    addw r0, r1, #854
+; CHECK-T2-NEXT:    bx lr
+;
+; CHECK-A-LABEL: test_add_i12:
+; CHECK-A:       @ %bb.0:
+; CHECK-A-NEXT:    bfc r1, #0, #13
+; CHECK-A-NEXT:    bfc r0, #0, #12
+; CHECK-A-NEXT:    tst r2, #1
+; CHECK-A-NEXT:    moveq r0, r1
+; CHECK-A-NEXT:    movw r1, #854
+; CHECK-A-NEXT:    orr r0, r0, r1
+; CHECK-A-NEXT:    bx lr
   %tmp = and i32 %a, -4096
   %tmp1 = and i32 %b, -8192
   %int = select i1 %tst, i32 %tmp, i32 %tmp1
@@ -38,4 +158,137 @@ define i32 @test_add_i12(i32 %a, i32 %b, i1 %tst) {
   ret i32 %res
 }
 
+define i32 @oradd(i32 %i, i32 %y) {
+; CHECK-T1-LABEL: oradd:
+; CHECK-T1:       @ %bb.0: @ %entry
+; CHECK-T1-NEXT:    lsls r0, r0, #1
+; CHECK-T1-NEXT:    adds r0, r0, #1
+; CHECK-T1-NEXT:    adds r0, r0, r1
+; CHECK-T1-NEXT:    bx lr
+;
+; CHECK-T2-LABEL: oradd:
+; CHECK-T2:       @ %bb.0: @ %entry
+; CHECK-T2-NEXT:    lsls r0, r0, #1
+; CHECK-T2-NEXT:    adds r0, #1
+; CHECK-T2-NEXT:    add r0, r1
+; CHECK-T2-NEXT:    bx lr
+;
+; CHECK-A-LABEL: oradd:
+; CHECK-A:       @ %bb.0: @ %entry
+; CHECK-A-NEXT:    mov r2, #1
+; CHECK-A-NEXT:    orr r0, r2, r0, lsl #1
+; CHECK-A-NEXT:    add r0, r0, r1
+; CHECK-A-NEXT:    bx lr
+entry:
+  %mul = shl i32 %i, 1
+  %or = or i32 %mul, 1
+  %add = add i32 %or, %y
+  ret i32 %add
+}
+
+define i32 @orgep(i32 %i, i32* %x, i32* %y) {
+; CHECK-T1-LABEL: orgep:
+; CHECK-T1:       @ %bb.0: @ %entry
+; CHECK-T1-NEXT:    lsls r0, r0, #3
+; CHECK-T1-NEXT:    adds r0, r0, #4
+; CHECK-T1-NEXT:    ldr r0, [r1, r0]
+; CHECK-T1-NEXT:    bx lr
+;
+; CHECK-T2-LABEL: orgep:
+; CHECK-T2:       @ %bb.0: @ %entry
+; CHECK-T2-NEXT:    lsls r0, r0, #3
+; CHECK-T2-NEXT:    adds r0, #4
+; CHECK-T2-NEXT:    ldr r0, [r1, r0]
+; CHECK-T2-NEXT:    bx lr
+;
+; CHECK-A-LABEL: orgep:
+; CHECK-A:       @ %bb.0: @ %entry
+; CHECK-A-NEXT:    mov r2, #4
+; CHECK-A-NEXT:    orr r0, r2, r0, lsl #3
+; CHECK-A-NEXT:    ldr r0, [r1, r0]
+; CHECK-A-NEXT:    bx lr
+entry:
+  %mul = shl i32 %i, 1
+  %add = or i32 %mul, 1
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %add
+  %0 = load i32, i32* %arrayidx, align 8
+  ret i32 %0
+}
+
+define i32 @orgeps(i32 %i, i32* %x, i32* %y) {
+; CHECK-T1-LABEL: orgeps:
+; CHECK-T1:       @ %bb.0: @ %entry
+; CHECK-T1-NEXT:    lsls r0, r0, #3
+; CHECK-T1-NEXT:    adds r2, r0, #4
+; CHECK-T1-NEXT:    ldr r2, [r1, r2]
+; CHECK-T1-NEXT:    adds r0, r0, r1
+; CHECK-T1-NEXT:    ldr r0, [r0, #8]
+; CHECK-T1-NEXT:    adds r0, r0, r2
+; CHECK-T1-NEXT:    bx lr
+;
+; CHECK-T2-LABEL: orgeps:
+; CHECK-T2:       @ %bb.0: @ %entry
+; CHECK-T2-NEXT:    lsls r2, r0, #3
+; CHECK-T2-NEXT:    add.w r0, r1, r0, lsl #3
+; CHECK-T2-NEXT:    adds r2, #4
+; CHECK-T2-NEXT:    ldr r0, [r0, #8]
+; CHECK-T2-NEXT:    ldr r2, [r1, r2]
+; CHECK-T2-NEXT:    add r0, r2
+; CHECK-T2-NEXT:    bx lr
+;
+; CHECK-A-LABEL: orgeps:
+; CHECK-A:       @ %bb.0: @ %entry
+; CHECK-A-NEXT:    mov r2, #4
+; CHECK-A-NEXT:    orr r2, r2, r0, lsl #3
+; CHECK-A-NEXT:    add r0, r1, r0, lsl #3
+; CHECK-A-NEXT:    ldr r2, [r1, r2]
+; CHECK-A-NEXT:    ldr r0, [r0, #8]
+; CHECK-A-NEXT:    add r0, r0, r2
+; CHECK-A-NEXT:    bx lr
+entry:
+  %mul = shl i32 %i, 1
+  %add = or i32 %mul, 1
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %add
+  %0 = load i32, i32* %arrayidx, align 8
+  %add2 = add i32 %mul, 2
+  %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 %add2
+  %1 = load i32, i32* %arrayidx3, align 8
+  %add4 = add i32 %1, %0
+  ret i32 %add4
+}
+
+define i32 @multiuse(i32 %i, i32* %x, i32* %y) {
+; CHECK-T1-LABEL: multiuse:
+; CHECK-T1:       @ %bb.0: @ %entry
+; CHECK-T1-NEXT:    lsls r0, r0, #1
+; CHECK-T1-NEXT:    adds r0, r0, #1
+; CHECK-T1-NEXT:    lsls r2, r0, #2
+; CHECK-T1-NEXT:    ldr r1, [r1, r2]
+; CHECK-T1-NEXT:    adds r0, r0, r1
+; CHECK-T1-NEXT:    bx lr
+;
+; CHECK-T2-LABEL: multiuse:
+; CHECK-T2:       @ %bb.0: @ %entry
+; CHECK-T2-NEXT:    lsls r0, r0, #1
+; CHECK-T2-NEXT:    adds r0, #1
+; CHECK-T2-NEXT:    ldr.w r1, [r1, r0, lsl #2]
+; CHECK-T2-NEXT:    add r0, r1
+; CHECK-T2-NEXT:    bx lr
+;
+; CHECK-A-LABEL: multiuse:
+; CHECK-A:       @ %bb.0: @ %entry
+; CHECK-A-NEXT:    mov r2, #1
+; CHECK-A-NEXT:    orr r0, r2, r0, lsl #1
+; CHECK-A-NEXT:    ldr r1, [r1, r0, lsl #2]
+; CHECK-A-NEXT:    add r0, r0, r1
+; CHECK-A-NEXT:    bx lr
+entry:
+  %mul = shl i32 %i, 1
+  %add = or i32 %mul, 1
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %add
+  %0 = load i32, i32* %arrayidx, align 8
+  %r = add i32 %add, %0
+  ret i32 %r
+}
+
 declare void @foo(i32)

diff  --git a/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll b/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
index ed37687a15949..4ee18c4a2358f 100644
--- a/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
@@ -1,10 +1,8 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
 ; This should compile without errors, and the offsets with respect to the
 ; beginning of the global "array" don't need to be multiples of 8.
-;
-; CHECK-DAG: memd(r2+##array+174)
-; CHECK-DAG: memd(r2+##array+182)
 
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
@@ -12,6 +10,21 @@ target triple = "hexagon"
 @array = external global [1000000 x i16], align 8
 
 define void @fred(i1 %x) #0 {
+; CHECK-LABEL: fred:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#0)
+; CHECK-NEXT:     r5:4 = combine(#0,#0)
+; CHECK-NEXT:     if (p0.new) r2 = #2
+; CHECK-NEXT:     if (!p0.new) r2 = #1026
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     memd(r2+##array+182) = r5:4
+; CHECK-NEXT:     memd(r2+##array+174) = r5:4
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
 b0:
   br i1 %x, label %b3, label %b1
 

diff  --git a/llvm/test/CodeGen/SystemZ/addr-01.ll b/llvm/test/CodeGen/SystemZ/addr-01.ll
index 5a0f9fe56c8d1..373fc578db831 100644
--- a/llvm/test/CodeGen/SystemZ/addr-01.ll
+++ b/llvm/test/CodeGen/SystemZ/addr-01.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; Test selection of addresses with indices in cases where the address
 ; is used once.
 ;
@@ -6,8 +7,9 @@
 ; A simple index address.
 define void @f1(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f1:
-; CHECK: lb %r0, 0(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, 0(%r3,%r2)
+; CHECK-NEXT:    br %r14
   %add = add i64 %addr, %index
   %ptr = inttoptr i64 %add to i8 *
   %a = load volatile i8, i8 *%ptr
@@ -17,8 +19,9 @@ define void @f1(i64 %addr, i64 %index) {
 ; An address with an index and a displacement (order 1).
 define void @f2(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f2:
-; CHECK: lb %r0, 100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, 100(%r3,%r2)
+; CHECK-NEXT:    br %r14
   %add1 = add i64 %addr, %index
   %add2 = add i64 %add1, 100
   %ptr = inttoptr i64 %add2 to i8 *
@@ -29,8 +32,9 @@ define void @f2(i64 %addr, i64 %index) {
 ; An address with an index and a displacement (order 2).
 define void @f3(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f3:
-; CHECK: lb %r0, 100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, 100(%r3,%r2)
+; CHECK-NEXT:    br %r14
   %add1 = add i64 %addr, 100
   %add2 = add i64 %add1, %index
   %ptr = inttoptr i64 %add2 to i8 *
@@ -41,8 +45,9 @@ define void @f3(i64 %addr, i64 %index) {
 ; An address with an index and a subtracted displacement (order 1).
 define void @f4(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f4:
-; CHECK: lb %r0, -100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, -100(%r3,%r2)
+; CHECK-NEXT:    br %r14
   %add1 = add i64 %addr, %index
   %add2 = sub i64 %add1, 100
   %ptr = inttoptr i64 %add2 to i8 *
@@ -53,8 +58,9 @@ define void @f4(i64 %addr, i64 %index) {
 ; An address with an index and a subtracted displacement (order 2).
 define void @f5(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f5:
-; CHECK: lb %r0, -100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, -100(%r3,%r2)
+; CHECK-NEXT:    br %r14
   %add1 = sub i64 %addr, 100
   %add2 = add i64 %add1, %index
   %ptr = inttoptr i64 %add2 to i8 *
@@ -65,9 +71,10 @@ define void @f5(i64 %addr, i64 %index) {
 ; An address with an index and a displacement added using OR.
 define void @f6(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f6:
-; CHECK: nill %r2, 65528
-; CHECK: lb %r0, 6(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    nill %r2, 65528
+; CHECK-NEXT:    lb %r0, 6(%r3,%r2)
+; CHECK-NEXT:    br %r14
   %aligned = and i64 %addr, -8
   %or = or i64 %aligned, 6
   %add = add i64 %or, %index
@@ -79,9 +86,10 @@ define void @f6(i64 %addr, i64 %index) {
 ; Like f6, but without the masking.  This OR doesn't count as a displacement.
 define void @f7(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f7:
-; CHECK: oill %r2, 6
-; CHECK: lb %r0, 0(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    oill %r2, 6
+; CHECK-NEXT:    lb %r0, 0(%r3,%r2)
+; CHECK-NEXT:    br %r14
   %or = or i64 %addr, 6
   %add = add i64 %or, %index
   %ptr = inttoptr i64 %add to i8 *
@@ -93,11 +101,12 @@ define void @f7(i64 %addr, i64 %index) {
 ; about the alignment of %add here.
 define void @f8(i64 %addr, i64 %index) {
 ; CHECK-LABEL: f8:
-; CHECK: nill %r2, 65528
-; CHECK: agr %r2, %r3
-; CHECK: oill %r2, 6
-; CHECK: lb %r0, 0(%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    nill %r2, 65528
+; CHECK-NEXT:    agr %r2, %r3
+; CHECK-NEXT:    oill %r2, 6
+; CHECK-NEXT:    lb %r0, 0(%r2)
+; CHECK-NEXT:    br %r14
   %aligned = and i64 %addr, -8
   %add = add i64 %aligned, %index
   %or = or i64 %add, 6

diff  --git a/llvm/test/CodeGen/SystemZ/addr-02.ll b/llvm/test/CodeGen/SystemZ/addr-02.ll
index 680d9c97b29de..615d35c8b9407 100644
--- a/llvm/test/CodeGen/SystemZ/addr-02.ll
+++ b/llvm/test/CodeGen/SystemZ/addr-02.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; addr-01.ll in which the address is also used in a non-address context.
 ; The assumption here is that we should match complex addresses where
 ; possible, but this might well need to change in future.
@@ -7,8 +8,11 @@
 ; A simple index address.
 define void @f1(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f1:
-; CHECK: lb %r0, 0(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, 0(%r3,%r2)
+; CHECK-NEXT:    la %r0, 0(%r3,%r2)
+; CHECK-NEXT:    stg %r0, 0(%r4)
+; CHECK-NEXT:    br %r14
   %add = add i64 %addr, %index
   %ptr = inttoptr i64 %add to i8 *
   %a = load volatile i8, i8 *%ptr
@@ -19,8 +23,11 @@ define void @f1(i64 %addr, i64 %index, i8 **%dst) {
 ; An address with an index and a displacement (order 1).
 define void @f2(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f2:
-; CHECK: lb %r0, 100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, 100(%r3,%r2)
+; CHECK-NEXT:    la %r0, 100(%r3,%r2)
+; CHECK-NEXT:    stg %r0, 0(%r4)
+; CHECK-NEXT:    br %r14
   %add1 = add i64 %addr, %index
   %add2 = add i64 %add1, 100
   %ptr = inttoptr i64 %add2 to i8 *
@@ -32,8 +39,11 @@ define void @f2(i64 %addr, i64 %index, i8 **%dst) {
 ; An address with an index and a displacement (order 2).
 define void @f3(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f3:
-; CHECK: lb %r0, 100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, 100(%r3,%r2)
+; CHECK-NEXT:    la %r0, 100(%r3,%r2)
+; CHECK-NEXT:    stg %r0, 0(%r4)
+; CHECK-NEXT:    br %r14
   %add1 = add i64 %addr, 100
   %add2 = add i64 %add1, %index
   %ptr = inttoptr i64 %add2 to i8 *
@@ -45,8 +55,11 @@ define void @f3(i64 %addr, i64 %index, i8 **%dst) {
 ; An address with an index and a subtracted displacement (order 1).
 define void @f4(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f4:
-; CHECK: lb %r0, -100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, -100(%r3,%r2)
+; CHECK-NEXT:    lay %r0, -100(%r3,%r2)
+; CHECK-NEXT:    stg %r0, 0(%r4)
+; CHECK-NEXT:    br %r14
   %add1 = add i64 %addr, %index
   %add2 = sub i64 %add1, 100
   %ptr = inttoptr i64 %add2 to i8 *
@@ -58,8 +71,11 @@ define void @f4(i64 %addr, i64 %index, i8 **%dst) {
 ; An address with an index and a subtracted displacement (order 2).
 define void @f5(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f5:
-; CHECK: lb %r0, -100(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lb %r0, -100(%r3,%r2)
+; CHECK-NEXT:    lay %r0, -100(%r3,%r2)
+; CHECK-NEXT:    stg %r0, 0(%r4)
+; CHECK-NEXT:    br %r14
   %add1 = sub i64 %addr, 100
   %add2 = add i64 %add1, %index
   %ptr = inttoptr i64 %add2 to i8 *
@@ -71,9 +87,12 @@ define void @f5(i64 %addr, i64 %index, i8 **%dst) {
 ; An address with an index and a displacement added using OR.
 define void @f6(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f6:
-; CHECK: nill %r2, 65528
-; CHECK: lb %r0, 6(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    nill %r2, 65528
+; CHECK-NEXT:    lb %r0, 6(%r3,%r2)
+; CHECK-NEXT:    la %r0, 6(%r3,%r2)
+; CHECK-NEXT:    stg %r0, 0(%r4)
+; CHECK-NEXT:    br %r14
   %aligned = and i64 %addr, -8
   %or = or i64 %aligned, 6
   %add = add i64 %or, %index
@@ -86,9 +105,12 @@ define void @f6(i64 %addr, i64 %index, i8 **%dst) {
 ; Like f6, but without the masking.  This OR doesn't count as a displacement.
 define void @f7(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f7:
-; CHECK: oill %r2, 6
-; CHECK: lb %r0, 0(%r3,%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    oill %r2, 6
+; CHECK-NEXT:    lb %r0, 0(%r3,%r2)
+; CHECK-NEXT:    la %r0, 0(%r3,%r2)
+; CHECK-NEXT:    stg %r0, 0(%r4)
+; CHECK-NEXT:    br %r14
   %or = or i64 %addr, 6
   %add = add i64 %or, %index
   %ptr = inttoptr i64 %add to i8 *
@@ -101,11 +123,13 @@ define void @f7(i64 %addr, i64 %index, i8 **%dst) {
 ; about the alignment of %add here.
 define void @f8(i64 %addr, i64 %index, i8 **%dst) {
 ; CHECK-LABEL: f8:
-; CHECK: nill %r2, 65528
-; CHECK: agr %r2, %r3
-; CHECK: oill %r2, 6
-; CHECK: lb %r0, 0(%r2)
-; CHECK: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    nill %r2, 65528
+; CHECK-NEXT:    agr %r2, %r3
+; CHECK-NEXT:    oill %r2, 6
+; CHECK-NEXT:    lb %r0, 0(%r2)
+; CHECK-NEXT:    stg %r2, 0(%r4)
+; CHECK-NEXT:    br %r14
   %aligned = and i64 %addr, -8
   %add = add i64 %aligned, %index
   %or = or i64 %add, 6


        


More information about the llvm-commits mailing list