[llvm] cb28768 - [PowerPC][AIX] Enable Shrinkwrapping on 32 and 64 bit AIX.

Sidharth Baveja via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 17 06:55:57 PST 2021


Author: Sidharth Baveja
Date: 2021-02-17T14:54:57Z
New Revision: cb2876800cc827431f4143c4fc5595c6c1191269

URL: https://github.com/llvm/llvm-project/commit/cb2876800cc827431f4143c4fc5595c6c1191269
DIFF: https://github.com/llvm/llvm-project/commit/cb2876800cc827431f4143c4fc5595c6c1191269.diff

LOG: [PowerPC][AIX] Enable Shrinkwrapping on 32 and 64 bit AIX.

Summary:
Currently Shrinkwrap is not enabled on AIX.
This patch enables shrink wrap on 32 and 64 bit AIX, and 64 bit ELF.

Reviewed By: sfertile, nemanjai

Differential Revision: https://reviews.llvm.org/D95094

Added: 
    

Modified: 
    llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
    llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
    llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
    llvm/test/CodeGen/PowerPC/shrink-wrap.ll
    llvm/test/CodeGen/PowerPC/shrink-wrap.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 50ce11b8374f..36bdd547923d 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -2545,6 +2545,5 @@ unsigned PPCFrameLowering::getBasePointerSaveOffset() const {
 bool PPCFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
   if (MF.getInfo<PPCFunctionInfo>()->shrinkWrapDisabled())
     return false;
-  return (MF.getSubtarget<PPCSubtarget>().isSVR4ABI() &&
-          MF.getSubtarget<PPCSubtarget>().isPPC64());
+  return !MF.getSubtarget<PPCSubtarget>().is32BitELFABI();
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll b/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
index 004bf64e6953..fccc2f684dba 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
@@ -1,5 +1,10 @@
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu %s -o - -enable-shrink-wrap=false -verify-machineinstrs |  FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,ENABLE,CHECK-64,ENABLE-64
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu %s -o - -enable-shrink-wrap=false -verify-machineinstrs |  FileCheck %s --check-prefixes=CHECK,DISABLE,CHECK-64,DISABLE-64
+; RUN: llc -mtriple=powerpc-ibm-aix-xcoff -mattr=-altivec -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,ENABLE,CHECK-32,ENABLE-32
+; RUN: llc -mtriple=powerpc-ibm-aix-xcoff %s -o - -enable-shrink-wrap=false -verify-machineinstrs |  FileCheck %s --check-prefixes=CHECK,DISABLE,CHECK-32,DISABLE-32
+; RUN: llc -mtriple=powerpc64-ibm-aix-xcoff -mattr=-altivec -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,ENABLE,CHECK-64,ENABLE-64
+; RUN: llc -mtriple=powerpc64-ibm-aix-xcoff %s -o - -enable-shrink-wrap=false -verify-machineinstrs |  FileCheck %s --check-prefixes=CHECK,DISABLE,CHECK-64,DISABLE-64
+;
 ;
 ; Note: Lots of tests use inline asm instead of regular calls.
 ; This allows to have a better control on what the allocation will do.
@@ -10,7 +15,7 @@
 
 
 ; Initial motivating example: Simple diamond with a call just on one side.
-; CHECK-LABEL: foo:
+; CHECK-LABEL: {{.*}}foo:
 ;
 ; Compare the arguments and return
 ; No prologue needed.
@@ -19,13 +24,13 @@
 ;
 ; Prologue code.
 ;  At a minimum, we save/restore the link register. Other registers may be saved
-;  as well. 
-; CHECK: mflr 
+;  as well.
+; CHECK: mflr
 ;
 ; Compare the arguments and jump to exit.
 ; After the prologue is set.
 ; DISABLE: cmpw 3, 4
-; DISABLE-NEXT: bge 0, .[[EXIT_LABEL:LBB[0-9_]+]]
+; DISABLE-NEXT: bge 0, {{.*}}[[EXIT_LABEL:BB[0-9_]+]]
 ;
 ; Store %a on the stack
 ; CHECK: stw 3, {{[0-9]+([0-9]+)}}
@@ -33,11 +38,11 @@
 ; CHECK-NEXT: addi 4, 1, {{[0-9]+}}
 ; Set the first argument to zero.
 ; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: bl doSomething
+; CHECK-NEXT: bl {{.*}}doSomething
 ;
 ; With shrink-wrapping, epilogue is just after the call.
 ; Restore the link register and return.
-; Note that there could be other epilog code before the link register is 
+; Note that there could be other epilog code before the link register is
 ; restored but we will not check for it here.
 ; ENABLE: mtlr
 ; ENABLE-NEXT: blr
@@ -69,50 +74,50 @@ false:
 declare i32 @doSomething(i32, i32*)
 
 
-
 ; Check that we do not perform the restore inside the loop whereas the save
 ; is outside.
-; CHECK-LABEL: freqSaveAndRestoreOutsideLoop:
+; CHECK-LABEL: {{.*}}freqSaveAndRestoreOutsideLoop:
 ;
 ; Shrink-wrapping allows to skip the prologue in the else case.
 ; ENABLE: cmplwi 3, 0
-; ENABLE: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Prologue code.
 ; Make sure we save the link register
 ; CHECK: mflr {{[0-9]+}}
 ;
 ; DISABLE: cmplwi 3, 0
-; DISABLE: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Loop preheader
 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
 ; CHECK-DAG: li [[IV:[0-9]+]], 10
-; 
+;
 ; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
 ; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]] 
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
 ;
 ; Next BB.
 ; CHECK: slwi 3, [[SUM]], 3
 ;
 ; Jump to epilogue.
-; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
+; DISABLE: b {{.*}}[[EPILOG_BB:BB[0-9_]+]]
 ;
-; DISABLE: .[[ELSE_LABEL]]: # %if.else
+; DISABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
 ; Shift second argument by one and store into returned register.
 ; DISABLE: slwi 3, 4, 1
-; DISABLE: .[[EPILOG_BB]]: # %if.end
+; DISABLE: {{.*}}[[EPILOG_BB]]: # %if.end
 ;
 ; Epilogue code.
 ; CHECK: mtlr {{[0-9]+}}
 ; CHECK: blr
 ;
-; ENABLE: .[[ELSE_LABEL]]: # %if.else
+; ENABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
 ; Shift second argument by one and store into returned register.
 ; ENABLE: slwi 3, 4, 1
 ; ENABLE-NEXT: blr
@@ -151,7 +156,7 @@ declare i32 @something(...)
 
 ; Check that we do not perform the shrink-wrapping inside the loop even
 ; though that would be legal. The cost model must prevent that.
-; CHECK-LABEL: freqSaveAndRestoreOutsideLoop2:
+; CHECK-LABEL: {{.*}}freqSaveAndRestoreOutsideLoop2:
 ; Prologue code.
 ; Make sure we save the link register before the call
 ; CHECK: mflr {{[0-9]+}}
@@ -159,14 +164,16 @@ declare i32 @something(...)
 ; Loop preheader
 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
 ; CHECK-DAG: li [[IV:[0-9]+]], 10
-; 
+;
 ; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
 ; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]] 
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
+;
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
 ;
 ; Next BB
 ; CHECK: %for.exit
@@ -200,49 +207,61 @@ for.end:                                          ; preds = %for.body
 
 ; Check with a more complex case that we do not have save within the loop and
 ; restore outside.
-; CHECK-LABEL: loopInfoSaveOutsideLoop:
+; CHECK-LABEL: {{.*}}loopInfoSaveOutsideLoop:
 ;
 ; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Prologue code.
-; Make sure we save the link register 
+; Make sure we save the link register
 ; CHECK: mflr {{[0-9]+}}
 ;
-; DISABLE: std
-; DISABLE-NEXT: std
-; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: stdu 1,
+; DISABLE-64-DAG: cmplwi 3, 0
+;
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stwu 1,
+; DISABLE-32-DAG: cmplwi 3, 0
+;
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Loop preheader
 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
 ; CHECK-DAG: li [[IV:[0-9]+]], 10
-; 
+;
 ; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
 ; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]] 
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
-; 
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
+;
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
+;
 ; Next BB
-; CHECK: bl somethingElse 
+; CHECK: bl {{.*}}somethingElse
 ; CHECK: slwi 3, [[SUM]], 3
 ;
 ; Jump to epilogue
-; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
+; DISABLE: b {{.*}}[[EPILOG_BB:BB[0-9_]+]]
 ;
-; DISABLE: .[[ELSE_LABEL]]: # %if.else
+; DISABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
 ; Shift second argument by one and store into returned register.
 ; DISABLE: slwi 3, 4, 1
 ;
-; DISABLE: .[[EPILOG_BB]]: # %if.end
+; DISABLE: {{.*}}[[EPILOG_BB]]: # %if.end
+;
 ; Epilog code
 ; CHECK: mtlr {{[0-9]+}}
 ; CHECK: blr
-; 
-; ENABLE: .[[ELSE_LABEL]]: # %if.else
+;
+; ENABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
 ; Shift second argument by one and store into returned register.
 ; ENABLE: slwi 3, 4, 1
 ; ENABLE-NEXT: blr
@@ -282,49 +301,60 @@ declare void @somethingElse(...)
 
 ; Check with a more complex case that we do not have restore within the loop and
 ; save outside.
-; CHECK-LABEL: loopInfoRestoreOutsideLoop:
+; CHECK-LABEL: {{.*}}loopInfoRestoreOutsideLoop:
 ;
 ; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Prologue code.
 ; Make sure we save the link register
 ; CHECK: mflr {{[0-9]+}}
 ;
-; DISABLE: std
-; DISABLE-NEXT: std
-; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: stdu 1,
+; DISABLE-64-DAG: cmplwi 3, 0
+;
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stwu 1,
+; DISABLE-32-DAG: cmplwi 3, 0
+;
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
-; CHECK: bl somethingElse
+; CHECK: bl {{.*}}somethingElse
 ;
 ; Loop preheader
 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
 ; CHECK-DAG: li [[IV:[0-9]+]], 10
-; 
+;
 ; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
 ; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]] 
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
 ;
-; Next BB. 
-; slwi 3, [[SUM]], 3
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
 ;
-; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
+; Next BB.
+; CHECK: slwi 3, [[SUM]], 3
 ;
-; DISABLE: .[[ELSE_LABEL]]: # %if.else
+; DISABLE: b {{.*}}[[EPILOG_BB:BB[0-9_]+]]
+;
+; DISABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
 ; Shift second argument by one and store into returned register.
 ; DISABLE: slwi 3, 4, 1
-; DISABLE: .[[EPILOG_BB]]: # %if.end
+; DISABLE: {{.*}}[[EPILOG_BB]]: # %if.end
 ;
 ; Epilogue code.
 ; CHECK: mtlr {{[0-9]+}}
 ; CHECK: blr
 ;
-; ENABLE: .[[ELSE_LABEL]]: # %if.else
+; ENABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
 ; Shift second argument by one and store into returned register.
 ; ENABLE: slwi 3, 4, 1
 ; ENABLE-NEXT: blr
@@ -360,7 +390,7 @@ if.end:                                           ; preds = %if.else, %for.end
 }
 
 ; Check that we handle function with no frame information correctly.
-; CHECK-LABEL: emptyFrame:
+; CHECK-LABEL: {{.*}}emptyFrame:
 ; CHECK: # %entry
 ; CHECK-NEXT: li 3, 0
 ; CHECK-NEXT: blr
@@ -371,40 +401,43 @@ entry:
 
 
 ; Check that we handle inline asm correctly.
-; CHECK-LABEL: inlineAsm:
+; CHECK-LABEL: {{.*}}inlineAsm:
 ;
 ; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Prologue code.
 ; Make sure we save the CSR used in the inline asm: r14
 ; ENABLE-DAG: li [[IV:[0-9]+]], 10
-; ENABLE-DAG: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
+; ENABLE-64-DAG: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
+; ENABLE-32-DAG: stw 14, -[[STACK_OFFSET:[0-9]+]](1) # 4-byte Folded Spill
 ;
 ; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-64-NEXT: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
+; DISABLE-32-NEXT: stw 14, -[[STACK_OFFSET:[0-9]+]](1) # 4-byte Folded Spill
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ; DISABLE: li [[IV:[0-9]+]], 10
 ;
 ; CHECK: nop
 ; CHECK: mtctr [[IV]]
 ;
-; CHECK: .[[LOOP_LABEL:LBB[0-9_]+]]: # %for.body
+; CHECK: {{.*}}[[LOOP_LABEL:BB[0-9_]+]]: # %for.body
 ; Inline asm statement.
 ; CHECK: addi 14, 14, 1
-; CHECK: bdnz .[[LOOP_LABEL]]
+; CHECK: bdnz {{.*}}[[LOOP_LABEL]]
 ;
 ; Epilogue code.
 ; CHECK: li 3, 0
-; CHECK-DAG: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
-; CHECK: nop
+; CHECK-64-DAG: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
+; CHECK-32-DAG: lwz 14, -[[STACK_OFFSET]](1) # 4-byte Folded Reload
+; CHECK-DAG: nop
 ; CHECK: blr
 ;
 ; CHECK: [[ELSE_LABEL]]
 ; CHECK-NEXT: slwi 3, 4, 1
-; DISABLE: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
+; DISABLE-64-NEXT: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
+; DISABLE-32-NEXT: lwz 14, -[[STACK_OFFSET]](1) # 4-byte Folded Reload
 ; CHECK-NEXT: blr
-; 
 define i32 @inlineAsm(i32 %cond, i32 %N) {
 entry:
   %tobool = icmp eq i32 %cond, 0
@@ -436,35 +469,43 @@ if.end:                                           ; preds = %for.body, %if.else
 
 
 ; Check that we handle calls to variadic functions correctly.
-; CHECK-LABEL: callVariadicFunc:
+; CHECK-LABEL: {{.*}}callVariadicFunc:
 ;
 ; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Prologue code.
 ; CHECK: mflr {{[0-9]+}}
-; 
+;
 ; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
 ;
 ; Setup of the varags.
-; CHECK: mr 4, 3
-; CHECK-NEXT: mr 5, 3
-; CHECK-NEXT: mr 6, 3
-; CHECK-NEXT: mr 7, 3
-; CHECK-NEXT: mr 8, 3
-; CHECK-NEXT: mr 9, 3
-; CHECK-NEXT: bl someVariadicFunc
+; CHECK-64: mr 4, 3
+; CHECK-64-NEXT: mr 5, 3
+; CHECK-64-NEXT: mr 6, 3
+; CHECK-64-NEXT: mr 7, 3
+; CHECK-64-NEXT: mr 8, 3
+; CHECK-64-NEXT: mr 9, 3
+;
+; CHECK-32: mr 3, 4
+; CHECK-32-NEXT: mr 5, 4
+; CHECK-32-NEXT: mr 6, 4
+; CHECK-32-NEXT: mr 7, 4
+; CHECK-32-NEXT: mr 8, 4
+; CHECK-32-NEXT: mr 9, 4
+;
+; CHECK-NEXT: bl {{.*}}someVariadicFunc
 ; CHECK: slwi 3, 3, 3
-; DISABLE: b .[[EPILOGUE_BB:LBB[0-9_]+]]
+; DISABLE: b {{.*}}[[EPILOGUE_BB:BB[0-9_]+]]
 ;
 ; ENABLE: mtlr {{[0-9]+}}
 ; ENABLE-NEXT: blr
 ;
-; CHECK: .[[ELSE_LABEL]]: # %if.else
+; CHECK: {{.*}}[[ELSE_LABEL]]: # %if.else
 ; CHECK-NEXT: slwi 3, 4, 1
-; 
-; DISABLE: .[[EPILOGUE_BB]]: # %if.end
+;
+; DISABLE: {{.*}}[[EPILOGUE_BB]]: # %if.end
 ; DISABLE: mtlr
 ; CHECK: blr
 define i32 @callVariadicFunc(i32 %cond, i32 %N) {
@@ -494,11 +535,11 @@ declare i32 @someVariadicFunc(i32, ...)
 ; Although this is not incorrect to insert such code, it is useless
 ; and it hurts the binary size.
 ;
-; CHECK-LABEL: noreturn:
+; CHECK-LABEL: {{.*}}noreturn:
 ; DISABLE: mflr {{[0-9]+}}
 ;
 ; CHECK: cmplwi 3, 0
-; CHECK-NEXT: bne{{[-]?}} 0, .[[ABORT:LBB[0-9_]+]]
+; CHECK-NEXT: bne{{[-]?}} 0, {{.*}}[[ABORT:BB[0-9_]+]]
 ;
 ; CHECK: li 3, 42
 ;
@@ -506,11 +547,9 @@ declare i32 @someVariadicFunc(i32, ...)
 ;
 ; CHECK-NEXT: blr
 ;
-; CHECK: .[[ABORT]]: # %if.abort
-;
+; CHECK: {{.*}}[[ABORT]]: # %if.abort
 ; ENABLE: mflr {{[0-9]+}}
-;
-; CHECK: bl abort
+; CHECK: bl {{.*}}abort
 ; ENABLE-NOT: mtlr {{[0-9]+}}
 define i32 @noreturn(i8 signext %bad_thing) {
 entry:
@@ -537,8 +576,8 @@ attributes #0 = { noreturn nounwind }
 ; dominator is itself. In this case, we cannot perform shrink wrapping, but we
 ; should return gracefully and continue compilation.
 ; The only condition for this test is the compilation finishes correctly.
-; 
-; CHECK-LABEL: infiniteloop
+;
+; CHECK-LABEL: {{.*}}infiniteloop
 ; CHECK: blr
 define void @infiniteloop() {
 entry:
@@ -560,7 +599,7 @@ if.end:
 }
 
 ; Another infinite loop test this time with a body bigger than just one block.
-; CHECK-LABEL: infiniteloop2
+; CHECK-LABEL: {{.*}}infiniteloop2
 ; CHECK: blr
 define void @infiniteloop2() {
 entry:
@@ -590,10 +629,8 @@ if.end:
 }
 
 ; Another infinite loop test this time with two nested infinite loop.
-; CHECK-LABEL: infiniteloop3
-; CHECK: Lfunc_begin[[FUNCNUM:[0-9]+]]
+; CHECK-LABEL: {{.*}}infiniteloop3
 ; CHECK: bclr
-; CHECK: Lfunc_end[[FUNCNUM]]
 define void @infiniteloop3() {
 entry:
   br i1 undef, label %loop2a, label %body
@@ -632,7 +669,7 @@ end:
 
 ; Test for a bug that was caused when save point was equal to restore point.
 ; Function Attrs: nounwind
-; CHECK-LABEL: transpose
+; CHECK-LABEL: {{.*}}transpose
 ;
 ; Store of callee-save register saved by shrink wrapping
 ; FIXME: Test disabled: Improved scheduling needs no spills/reloads any longer!

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll b/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
index 82d2ec6b96ff..856b66d4e7a1 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
@@ -2,7 +2,9 @@
 ; RUN: llc -relocation-model=static -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -disable-ppc-sco=false --enable-shrink-wrap=true | FileCheck %s -check-prefix=CHECK-SCO-ONLY
 ; RUN: llc -relocation-model=static -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -disable-ppc-sco=false --enable-shrink-wrap=false | FileCheck %s -check-prefix=CHECK-SCO-ONLY
 ; RUN: llc -relocation-model=static -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -disable-ppc-sco=false --enable-shrink-wrap=true | FileCheck %s -check-prefix=CHECK-SCO-ONLY
-; Edit: D63152 prevents stack popping before loads and stores, so shrink-wrap does nothing here
+; RUN: not --crash llc -relocation-model=pic -verify-machineinstrs < %s -mtriple=powerpc64-ibm-aix-xcoff -tailcallopt -disable-ppc-sco=false --enable-shrink-wrap=true 2>&1 | FileCheck %s -check-prefix=CHECK-AIX
+;; The above RUN command is expected to fail on AIX since tail calling is not implemented ATM
+;; Edit: D63152 prevents stack popping before loads and stores, so shrink-wrap does nothing here
 %"class.clang::NamedDecl" = type { i32 }
 declare void @__assert_fail();
 
@@ -37,6 +39,8 @@ exit:
 ; CHECK-SCO-ONLY: b LVComputationKind
 ; CHECK-SCO-ONLY: #TC_RETURNd8
 ; CHECK-SCO-ONLY: bl __assert_fail
+;
+; CHECK-AIX: LLVM ERROR: Tail call support is unimplemented on AIX.
 }
 
 define dso_local fastcc i8 @LVComputationKind(

diff  --git a/llvm/test/CodeGen/PowerPC/shrink-wrap.ll b/llvm/test/CodeGen/PowerPC/shrink-wrap.ll
index 8bdba1e3ea3e..3d4a187c3fbb 100644
--- a/llvm/test/CodeGen/PowerPC/shrink-wrap.ll
+++ b/llvm/test/CodeGen/PowerPC/shrink-wrap.ll
@@ -1,4 +1,7 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-unknown -mcpu=pwr9 | FileCheck  %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-unknown -mcpu=pwr9 | FileCheck  %s --check-prefixes=CHECK,CHECK64
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-ibm-aix-xcoff -mcpu=pwr9 -mattr=-altivec | FileCheck  %s --check-prefixes=CHECK,CHECK32
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-ibm-aix-xcoff -mcpu=pwr9 -mattr=-altivec | FileCheck  %s --check-prefixes=CHECK,CHECK64
+
 define signext i32 @shrinkwrapme(i32 signext %a, i32 signext %lim) {
 entry:
   %cmp5 = icmp sgt i32 %lim, 0
@@ -22,31 +25,29 @@ entry:
   %exitcond = icmp eq i32 %inc, %lim
   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
 
-; CHECK-LABEL: shrinkwrapme
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    cmpwi
+; CHECK-LABEL: {{[\.]?}}shrinkwrapme:
+; CHECK:            # %bb.0:
+; CHECK-NEXT:         cmpwi
 ; Prolog code
-; CHECK:         std 
-; CHECK:         std 
-; CHECK:         std 
-; CHECK:         std 
-; CHECK:         blt 0, .LBB0_3
-; CHECK:       # %bb.1:
-; CHECK-NEXT:    clrldi
-; CHECK-NEXT:    mtctr
-; CHECK-NEXT:    li 
-; CHECK:       .LBB0_2: 
-; CHECK:         add
-; CHECK:         bdnz .LBB0_2 
-; CHECK-NEXT:    b .LBB0_4
-; CHECK:       .LBB0_3: 
-; CHECK-NEXT:    li 
-; CHECK:       .LBB0_4: 
+; CHECK64-COUNT-18:   std
+
+; CHECK32-COUNT-18:   stw
+
+; CHECK:              blt 0, {{.*}}BB0_3
+; CHECK:            # %bb.1:
+; CHECK:              li
+; CHECK:            {{.*}}BB0_2:
+; CHECK:              add
+; CHECK:              bdnz {{.*}}BB0_2
+; CHECK-NEXT:         b {{.*}}BB0_4
+; CHECK:            {{.*}}BB0_3:
+; CHECK-NEXT:         li
+; CHECK:            {{.*}}BB0_4:
+
 ; Epilog code
-; CHECK:         ld 
-; CHECK:         ld 
-; CHECK:         extsw
-; CHECK:         ld 
-; CHECK:         ld 
-; CHECK:         blr
+; CHECK64-COUNT-18:   ld
+;
+; CHECK32-COUNT-18:   lwz
+
+; CHECK:              blr
 }

diff  --git a/llvm/test/CodeGen/PowerPC/shrink-wrap.mir b/llvm/test/CodeGen/PowerPC/shrink-wrap.mir
index cb125abde53e..5c3b6ad347ca 100644
--- a/llvm/test/CodeGen/PowerPC/shrink-wrap.mir
+++ b/llvm/test/CodeGen/PowerPC/shrink-wrap.mir
@@ -1,42 +1,46 @@
 # RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple powerpc64le-unknown-linux-gnu \
 # RUN:   -run-pass=shrink-wrap -o - %s | FileCheck %s
+# RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple powerpc-ibm-aix-xcoff \
+# RUN:   -run-pass=shrink-wrap -mattr=-altivec -o - %s | FileCheck %s
+# RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple powerpc64-ibm-aix-xcoff \
+# RUN:   -run-pass=shrink-wrap -mattr=-altivec -o - %s | FileCheck %s
 --- |
   ; ModuleID = 'test.ll'
   source_filename = "test.ll"
   target datalayout = "e-m:e-i64:64-n32:64"
-  
+
   define signext i32 @shrinkwrapme(i32 signext %a, i32 signext %lim) {
   entry:
     %cmp5 = icmp sgt i32 %lim, 0
     br i1 %cmp5, label %for.body.preheader, label %for.cond.cleanup
-  
+
   for.body.preheader:                               ; preds = %entry
     %0 = add i32 %lim, -1
     %1 = zext i32 %0 to i64
     %2 = add nuw nsw i64 %1, 1
     call void @llvm.set.loop.iterations.i64(i64 %2)
     br label %for.body
-  
+
   for.cond.cleanup:                                 ; preds = %for.body, %entry
     %Ret.0.lcssa = phi i32 [ 0, %entry ], [ %3, %for.body ]
     ret i32 %Ret.0.lcssa
-  
+
   for.body:                                         ; preds = %for.body, %for.body.preheader
     %Ret.06 = phi i32 [ %3, %for.body ], [ 0, %for.body.preheader ]
     %3 = tail call i32 asm "add $0, $1, $2", "=r,r,r,~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"(i32 %a, i32 %Ret.06)
     %4 = call i1 @llvm.loop.decrement.i64(i64 1)
     br i1 %4, label %for.body, label %for.cond.cleanup
   }
-  
+
   ; Function Attrs: noduplicate nounwind
   declare void @llvm.set.loop.iterations.i64(i64) #0
-  
+
   ; Function Attrs: noduplicate nounwind
   declare i1 @llvm.loop.decrement.i64(i64) #0
-  
+
   ; Function Attrs: nounwind
   declare void @llvm.stackprotector(i8*, i8**) #1
-  
+
   attributes #0 = { noduplicate nounwind }
   attributes #1 = { nounwind }
 
@@ -83,37 +87,37 @@ body:             |
   bb.0.entry:
     successors: %bb.2(0x50000000), %bb.1(0x30000000)
     liveins: $x3, $x4
-  
+
     renamable $cr0 = CMPWI renamable $r4, 1
     BCC 4, killed renamable $cr0, %bb.2
-  
+
   bb.1:
     successors: %bb.3(0x80000000)
-  
+
     renamable $r4 = LI 0
     B %bb.3
-  
+
   bb.2.for.body.preheader:
     successors: %bb.4(0x80000000)
     liveins: $x3, $x4
-  
+
     renamable $r4 = ADDI renamable $r4, -1, implicit killed $x4, implicit-def $x4
     renamable $x4 = RLDICL killed renamable $x4, 0, 32
     renamable $x4 = nuw nsw ADDI8 killed renamable $x4, 1
     MTCTR8loop killed renamable $x4, implicit-def dead $ctr8
     renamable $r4 = LI 0
     B %bb.4
-  
+
   bb.3.for.cond.cleanup:
     liveins: $r4
-  
+
     renamable $x3 = EXTSW_32_64 killed renamable $r4
     BLR8 implicit $lr8, implicit $rm, implicit $x3
-  
+
   bb.4.for.body:
     successors: %bb.4(0x7c000000), %bb.3(0x04000000)
     liveins: $r4, $x3
-  
+
     INLINEASM &"add $0, $1, $2", 0, 131082, def renamable $r4, 131081, renamable $r3, 131081, killed renamable $r4, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15, 12, implicit-def dead early-clobber $r16, 12, implicit-def dead early-clobber $r17, 12, implicit-def dead early-clobber $r18, 12, implicit-def dead early-clobber $r19, 12, implicit-def dead early-clobber $r20, 12, implicit-def dead early-clobber $r21, 12, implicit-def dead early-clobber $r22, 12, implicit-def dead early-clobber $r23, 12, implicit-def dead early-clobber $r24, 12, implicit-def dead early-clobber $r25, 12, implicit-def dead early-clobber $r26, 12, implicit-def dead early-clobber $r27, 12, implicit-def dead early-clobber $r28, 12, implicit-def dead early-clobber $r29, 12, implicit-def dead early-clobber $r30, 12, implicit-def dead early-clobber $r31
     BDNZ8 %bb.4, implicit-def dead $ctr8, implicit $ctr8
     B %bb.3


        


More information about the llvm-commits mailing list