[llvm-branch-commits] [llvm] 08280c4 - [NFC][Test] Format the PowerPC test for incoming patch

QingShan Zhang via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Dec 11 01:57:43 PST 2020


Author: QingShan Zhang
Date: 2020-12-11T09:53:20Z
New Revision: 08280c4b73439e5f99000c89a818f66343e87aa6

URL: https://github.com/llvm/llvm-project/commit/08280c4b73439e5f99000c89a818f66343e87aa6
DIFF: https://github.com/llvm/llvm-project/commit/08280c4b73439e5f99000c89a818f66343e87aa6.diff

LOG: [NFC][Test] Format the PowerPC test for incoming patch

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
    llvm/test/CodeGen/PowerPC/unal4-std.ll
    llvm/test/CodeGen/PowerPC/unaligned.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll b/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
index b672eef8740af..d6ed3dcf41b0a 100644
--- a/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
+++ b/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
@@ -1,16 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
 ; RUN:   -verify-machineinstrs -ppc-asm-full-reg-names \
 ; RUN:   -ppc-vsr-nums-as-vr < %s | FileCheck %s
 
 @best8x8mode = external dso_local local_unnamed_addr global [4 x i16], align 2
 define dso_local void @AlignDSForm() local_unnamed_addr {
+; CHECK-LABEL: AlignDSForm:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addis r3, r2, best8x8mode at toc@ha
+; CHECK-NEXT:    addi r3, r3, best8x8mode at toc@l
+; CHECK-NEXT:    ldx r3, 0, r3
+; CHECK-NEXT:    std r3, 0(r3)
 entry:
   %0 = load <4 x i16>, <4 x i16>* bitcast ([4 x i16]* @best8x8mode to <4 x i16>*), align 2
   store <4 x i16> %0, <4 x i16>* undef, align 4
   unreachable
-; CHECK-LABEL: AlignDSForm
-; CHECK: addis r{{[0-9]+}}, r{{[0-9]+}}, best8x8mode at toc@ha
-; CHECK: addi r[[REG:[0-9]+]], r{{[0-9]+}}, best8x8mode at toc@l
-; CHECK: ldx r{{[0-9]+}}, 0, r[[REG]]
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/unal4-std.ll b/llvm/test/CodeGen/PowerPC/unal4-std.ll
index f843b6b58c1ee..038ede0ba92ee 100644
--- a/llvm/test/CodeGen/PowerPC/unal4-std.ll
+++ b/llvm/test/CodeGen/PowerPC/unal4-std.ll
@@ -1,9 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs < %s -mcpu=pwr7 -mattr=-vsx| FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 define void @copy_to_conceal(<8 x i16>* %inp) #0 {
+; CHECK-LABEL: copy_to_conceal:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vxor 2, 2, 2
+; CHECK-NEXT:    addi 4, 1, -16
+; CHECK-NEXT:    stvx 2, 0, 4
+; CHECK-NEXT:    ld 4, -8(1)
+; CHECK-NEXT:    std 4, 8(3)
+; CHECK-NEXT:    ld 4, -16(1)
+; CHECK-NEXT:    stdx 4, 0, 3
+; CHECK-NEXT:    blr
+;
+; CHECK-VSX-LABEL: copy_to_conceal:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    xxlxor 0, 0, 0
+; CHECK-VSX-NEXT:    stxvw4x 0, 0, 3
+; CHECK-VSX-NEXT:    blr
 entry:
   store <8 x i16> zeroinitializer, <8 x i16>* %inp, align 2
   br label %if.end210
@@ -14,11 +31,7 @@ if.end210:                                        ; preds = %entry
 ; This will generate two align-1 i64 stores. Make sure that they are
 ; indexed stores and not in r+i form (which require the offset to be
 ; a multiple of 4).
-; CHECK: @copy_to_conceal
-; CHECK: stdx {{[0-9]+}}, 0,
 
-; CHECK-VSX: @copy_to_conceal
-; CHECK-VSX: stxvw4x {{[0-9]+}}, 0,
 }
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/PowerPC/unaligned.ll b/llvm/test/CodeGen/PowerPC/unaligned.ll
index bd518342f3ec9..977c470e668e2 100644
--- a/llvm/test/CodeGen/PowerPC/unaligned.ll
+++ b/llvm/test/CodeGen/PowerPC/unaligned.ll
@@ -1,105 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
 
 define void @foo1(i16* %p, i16* %r) nounwind {
+; CHECK-LABEL: foo1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lhz 3, 0(3)
+; CHECK-NEXT:    sth 3, 0(4)
+; CHECK-NEXT:    blr
+;
+; CHECK-VSX-LABEL: foo1:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    lhz 3, 0(3)
+; CHECK-VSX-NEXT:    sth 3, 0(4)
+; CHECK-VSX-NEXT:    blr
 entry:
   %v = load i16, i16* %p, align 1
   store i16 %v, i16* %r, align 1
   ret void
 
-; CHECK: @foo1
-; CHECK: lhz
-; CHECK: sth
 
-; CHECK-VSX: @foo1
-; CHECK-VSX: lhz
-; CHECK-VSX: sth
 }
 
 define void @foo2(i32* %p, i32* %r) nounwind {
+; CHECK-LABEL: foo2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lwz 3, 0(3)
+; CHECK-NEXT:    stw 3, 0(4)
+; CHECK-NEXT:    blr
+;
+; CHECK-VSX-LABEL: foo2:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    lwz 3, 0(3)
+; CHECK-VSX-NEXT:    stw 3, 0(4)
+; CHECK-VSX-NEXT:    blr
 entry:
   %v = load i32, i32* %p, align 1
   store i32 %v, i32* %r, align 1
   ret void
 
-; CHECK: @foo2
-; CHECK: lwz
-; CHECK: stw
 
-; CHECK-VSX: @foo2
-; CHECK-VSX: lwz
-; CHECK-VSX: stw
 }
 
 define void @foo3(i64* %p, i64* %r) nounwind {
+; CHECK-LABEL: foo3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ldx 3, 0, 3
+; CHECK-NEXT:    stdx 3, 0, 4
+; CHECK-NEXT:    blr
+;
+; CHECK-VSX-LABEL: foo3:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    ldx 3, 0, 3
+; CHECK-VSX-NEXT:    stdx 3, 0, 4
+; CHECK-VSX-NEXT:    blr
 entry:
   %v = load i64, i64* %p, align 1
   store i64 %v, i64* %r, align 1
   ret void
 
-; CHECK: @foo3
-; CHECK: ld
-; CHECK: std
 
-; CHECK-VSX: @foo3
-; CHECK-VSX: ld
-; CHECK-VSX: std
 }
 
 define void @foo4(float* %p, float* %r) nounwind {
+; CHECK-LABEL: foo4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lfs 0, 0(3)
+; CHECK-NEXT:    stfs 0, 0(4)
+; CHECK-NEXT:    blr
+;
+; CHECK-VSX-LABEL: foo4:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    lfs 0, 0(3)
+; CHECK-VSX-NEXT:    stfs 0, 0(4)
+; CHECK-VSX-NEXT:    blr
 entry:
   %v = load float, float* %p, align 1
   store float %v, float* %r, align 1
   ret void
 
-; CHECK: @foo4
-; CHECK: lfs
-; CHECK: stfs
 
-; CHECK-VSX: @foo4
-; CHECK-VSX: lfs
-; CHECK-VSX: stfs
 }
 
 define void @foo5(double* %p, double* %r) nounwind {
+; CHECK-LABEL: foo5:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lfd 0, 0(3)
+; CHECK-NEXT:    stfd 0, 0(4)
+; CHECK-NEXT:    blr
+;
+; CHECK-VSX-LABEL: foo5:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    lfdx 0, 0, 3
+; CHECK-VSX-NEXT:    stfdx 0, 0, 4
+; CHECK-VSX-NEXT:    blr
 entry:
   %v = load double, double* %p, align 1
   store double %v, double* %r, align 1
   ret void
 
-; CHECK: @foo5
-; CHECK: lfd
-; CHECK: stfd
 
-; CHECK-VSX: @foo5
-; CHECK-VSX: lfdx
-; CHECK-VSX: stfdx
 }
 
 define void @foo6(<4 x float>* %p, <4 x float>* %r) nounwind {
-entry:
-  %v = load <4 x float>, <4 x float>* %p, align 1
-  store <4 x float> %v, <4 x float>* %r, align 1
-  ret void
-
 ; These loads and stores are legalized into aligned loads and stores
 ; using aligned stack slots.
-; CHECK: @foo6
-; CHECK-DAG: ld
-; CHECK-DAG: ld
-; CHECK-DAG: std
-; CHECK: stdx
-
+; CHECK-LABEL: foo6:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li 5, 15
+; CHECK-NEXT:    lvsl 3, 0, 3
+; CHECK-NEXT:    lvx 2, 3, 5
+; CHECK-NEXT:    lvx 4, 0, 3
+; CHECK-NEXT:    addi 3, 1, -16
+; CHECK-NEXT:    vperm 2, 4, 2, 3
+; CHECK-NEXT:    stvx 2, 0, 3
+; CHECK-NEXT:    ld 3, -8(1)
+; CHECK-NEXT:    std 3, 8(4)
+; CHECK-NEXT:    ld 3, -16(1)
+; CHECK-NEXT:    stdx 3, 0, 4
+; CHECK-NEXT:    blr
+;
+; CHECK-VSX-LABEL: foo6:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    li 5, 15
+; CHECK-VSX-NEXT:    lvsl 3, 0, 3
+; CHECK-VSX-NEXT:    lvx 2, 3, 5
+; CHECK-VSX-NEXT:    lvx 4, 0, 3
+; CHECK-VSX-NEXT:    vperm 2, 4, 2, 3
+; CHECK-VSX-NEXT:    stxvw4x 34, 0, 4
+; CHECK-VSX-NEXT:    blr
 ; For VSX on P7, unaligned loads and stores are preferable to aligned
 ; stack slots, but lvsl/vperm is better still.  (On P8 lxvw4x is preferable.)
 ; Using unaligned stxvw4x is preferable on both machines.
-; CHECK-VSX: @foo6
-; CHECK-VSX-DAG: lvsl
-; CHECK-VSX-DAG: lvx
-; CHECK-VSX-DAG: lvx
-; CHECK-VSX: vperm
-; CHECK-VSX: stxvw4x
+entry:
+  %v = load <4 x float>, <4 x float>* %p, align 1
+  store <4 x float> %v, <4 x float>* %r, align 1
+  ret void
 }
 


        


More information about the llvm-branch-commits mailing list