[llvm] fd9f42f - [PowerPC] Convert some tests to opaque pointers (NFC)

Sergei Barannikov via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 29 13:42:42 PST 2023


Author: Sergei Barannikov
Date: 2023-01-30T00:40:12+03:00
New Revision: fd9f42fad22ca6ca7cb5041f9fc3c89d22f12d92

URL: https://github.com/llvm/llvm-project/commit/fd9f42fad22ca6ca7cb5041f9fc3c89d22f12d92
DIFF: https://github.com/llvm/llvm-project/commit/fd9f42fad22ca6ca7cb5041f9fc3c89d22f12d92.diff

LOG: [PowerPC] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/aix32-cc-abi-vaarg.ll
    llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee-split.ll
    llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee.ll
    llvm/test/CodeGen/PowerPC/aix32-vector-vararg-fixed-callee.ll
    llvm/test/CodeGen/PowerPC/aix64-vector-vararg-callee.ll
    llvm/test/CodeGen/PowerPC/aix64-vector-vararg-fixed-callee.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/aix32-cc-abi-vaarg.ll b/llvm/test/CodeGen/PowerPC/aix32-cc-abi-vaarg.ll
index def788f878e2f..e9f884c66888e 100644
--- a/llvm/test/CodeGen/PowerPC/aix32-cc-abi-vaarg.ll
+++ b/llvm/test/CodeGen/PowerPC/aix32-cc-abi-vaarg.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -O2 -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=ASM32 %s
-; RUN: llc -opaque-pointers=0 -O2 -mtriple powerpc-ibm-aix-xcoff -stop-after=machine-cp -verify-machineinstrs < %s | FileCheck --check-prefix=32BIT %s
+; RUN: llc -O2 -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=ASM32 %s
+; RUN: llc -O2 -mtriple powerpc-ibm-aix-xcoff -stop-after=machine-cp -verify-machineinstrs < %s | FileCheck --check-prefix=32BIT %s
 
 define i32 @int_va_arg(i32 %a, ...) local_unnamed_addr  {
 ; ASM32-LABEL: int_va_arg:
@@ -25,31 +25,27 @@ define i32 @int_va_arg(i32 %a, ...) local_unnamed_addr  {
 ; ASM32-NEXT:    stw 5, 32(1)
 ; ASM32-NEXT:    blr
 entry:
-  %arg1 = alloca i8*, align 4
-  %arg2 = alloca i8*, align 4
-  %0 = bitcast i8** %arg1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %1 = bitcast i8** %arg2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.va_start(i8* nonnull %0)
-  call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
-  %argp.cur = load i8*, i8** %arg1, align 4
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-  store i8* %argp.next, i8** %arg1, align 4
-  %2 = bitcast i8* %argp.cur to i32*
-  %3 = load i32, i32* %2, align 4
-  %add = add nsw i32 %3, %a
-  %argp.cur2 = load i8*, i8** %arg2, align 4
-  %argp.next3 = getelementptr inbounds i8, i8* %argp.cur2, i32 4
-  store i8* %argp.next3, i8** %arg2, align 4
-  %4 = bitcast i8* %argp.cur2 to i32*
-  %5 = load i32, i32* %4, align 4
-  %mul = shl i32 %5, 1
+  %arg1 = alloca ptr, align 4
+  %arg2 = alloca ptr, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg1)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.va_start(ptr nonnull %arg1)
+  call void @llvm.va_copy(ptr nonnull %arg2, ptr nonnull %arg1)
+  %argp.cur = load ptr, ptr %arg1, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %arg1, align 4
+  %0 = load i32, ptr %argp.cur, align 4
+  %add = add nsw i32 %0, %a
+  %argp.cur2 = load ptr, ptr %arg2, align 4
+  %argp.next3 = getelementptr inbounds i8, ptr %argp.cur2, i32 4
+  store ptr %argp.next3, ptr %arg2, align 4
+  %1 = load i32, ptr %argp.cur2, align 4
+  %mul = shl i32 %1, 1
   %add4 = add nsw i32 %add, %mul
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.va_end(i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.va_end(ptr nonnull %arg1)
+  call void @llvm.va_end(ptr nonnull %arg2)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg1)
   ret i32 %add4
 }
 
@@ -82,21 +78,21 @@ entry:
 ; 32BIT-DAG:     STW killed renamable $r10, 24, %fixed-stack.0 :: (store (s32))
 ; 32BIT-DAG:     STW killed renamable $r4, 0, %stack.1.arg2 :: (store (s32) into %ir.arg2)
 ; 32BIT-DAG:     renamable $r4 = ADDI %fixed-stack.0, 4
-; 32BIT-DAG:     STW killed renamable $r11, 0, %stack.1.arg2 :: (store (s32) into %ir.1)
+; 32BIT-DAG:     STW killed renamable $r11, 0, %stack.1.arg2 :: (store (s32) into %ir.arg2)
 ; 32BIT-DAG:     renamable $r11 = ADDI %fixed-stack.0, 0
-; 32BIT-DAG:     STW renamable $r11, 0, %stack.0.arg1 :: (store (s32) into %ir.0)
+; 32BIT-DAG:     STW renamable $r11, 0, %stack.0.arg1 :: (store (s32) into %ir.arg1)
 ; 32BIT-DAG:     STW renamable $r4, 0, %stack.0.arg1 :: (store (s32) into %ir.arg1)
-; 32BIT-DAG:     renamable $r6 = LWZ 0, %fixed-stack.0 :: (load (s32) from %ir.2)
-; 32BIT-DAG:     renamable $r4 = LWZ 0, %fixed-stack.0 :: (load (s32) from %ir.4)
+; 32BIT-DAG:     renamable $r6 = LWZ 0, %fixed-stack.0 :: (load (s32) from %ir.argp.cur)
+; 32BIT-DAG:     renamable $r4 = LWZ 0, %fixed-stack.0 :: (load (s32) from %ir.argp.cur2)
 ; 32BIT-DAG:     renamable $r3 = nsw ADD4 killed renamable $r6, killed renamable $r3
 ; 32BIT-DAG:     renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4
 ; 32BIT-DAG:     BLR implicit $lr, implicit $rm, implicit $r3
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 define i32 @int_stack_va_arg(i32 %one, i32 %two, i32 %three, i32 %four, i32 %five, i32 %six, i32 %seven, i32 %eight, ...) local_unnamed_addr {
 ; ASM32-LABEL: int_stack_va_arg:
@@ -118,14 +114,12 @@ define i32 @int_stack_va_arg(i32 %one, i32 %two, i32 %three, i32 %four, i32 %fiv
 ; ASM32-NEXT:    add 3, 3, 4
 ; ASM32-NEXT:    blr
 entry:
-  %arg1 = alloca i8*, align 4
-  %arg2 = alloca i8*, align 4
-  %0 = bitcast i8** %arg1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %1 = bitcast i8** %arg2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.va_start(i8* nonnull %0)
-  call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
+  %arg1 = alloca ptr, align 4
+  %arg2 = alloca ptr, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg1)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.va_start(ptr nonnull %arg1)
+  call void @llvm.va_copy(ptr nonnull %arg2, ptr nonnull %arg1)
   %add = add nsw i32 %two, %one
   %add2 = add nsw i32 %add, %three
   %add3 = add nsw i32 %add2, %four
@@ -133,23 +127,21 @@ entry:
   %add5 = add nsw i32 %add4, %six
   %add6 = add nsw i32 %add5, %seven
   %add7 = add nsw i32 %add6, %eight
-  %argp.cur = load i8*, i8** %arg1, align 4
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-  store i8* %argp.next, i8** %arg1, align 4
-  %2 = bitcast i8* %argp.cur to i32*
-  %3 = load i32, i32* %2, align 4
-  %add8 = add nsw i32 %add7, %3
-  %argp.cur9 = load i8*, i8** %arg2, align 4
-  %argp.next10 = getelementptr inbounds i8, i8* %argp.cur9, i32 4
-  store i8* %argp.next10, i8** %arg2, align 4
-  %4 = bitcast i8* %argp.cur9 to i32*
-  %5 = load i32, i32* %4, align 4
-  %mul = shl i32 %5, 1
+  %argp.cur = load ptr, ptr %arg1, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %arg1, align 4
+  %0 = load i32, ptr %argp.cur, align 4
+  %add8 = add nsw i32 %add7, %0
+  %argp.cur9 = load ptr, ptr %arg2, align 4
+  %argp.next10 = getelementptr inbounds i8, ptr %argp.cur9, i32 4
+  store ptr %argp.next10, ptr %arg2, align 4
+  %1 = load i32, ptr %argp.cur9, align 4
+  %mul = shl i32 %1, 1
   %add11 = add nsw i32 %add8, %mul
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.va_end(i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.va_end(ptr nonnull %arg1)
+  call void @llvm.va_end(ptr nonnull %arg2)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg1)
   ret i32 %add11
 }
 
@@ -184,7 +176,7 @@ entry:
 ; 32BIT-DAG:     renamable $r4 = ADDI %fixed-stack.0, 0
 ; 32BIT-DAG:     STW killed renamable $r4, 0, %stack.0.arg1 :: (store (s32) into %ir.arg1)
 ; 32BIT-DAG:     renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r4
-; 32BIT-DAG:     renamable $r4 = LWZ 0, %fixed-stack.0 :: (load (s32) from %ir.4, align 8)
+; 32BIT-DAG:     renamable $r4 = LWZ 0, %fixed-stack.0 :: (load (s32) from %ir.argp.cur9, align 8)
 ; 32BIT-DAG:     renamable $r11 = LI 4
 ; 32BIT-DAG:     BLR implicit $lr, implicit $rm, implicit $r3
 
@@ -211,31 +203,27 @@ define double @double_va_arg(double %a, ...) local_unnamed_addr  {
 ; ASM32-NEXT:    stw 3, -8(1)
 ; ASM32-NEXT:    blr
 entry:
-  %arg1 = alloca i8*, align 4
-  %arg2 = alloca i8*, align 4
-  %0 = bitcast i8** %arg1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %1 = bitcast i8** %arg2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.va_start(i8* nonnull %0)
-  call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
-  %argp.cur = load i8*, i8** %arg1, align 4
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 8
-  store i8* %argp.next, i8** %arg1, align 4
-  %2 = bitcast i8* %argp.cur to double*
-  %3 = load double, double* %2, align 4
-  %add = fadd double %3, %a
-  %argp.cur2 = load i8*, i8** %arg2, align 4
-  %argp.next3 = getelementptr inbounds i8, i8* %argp.cur2, i32 8
-  store i8* %argp.next3, i8** %arg2, align 4
-  %4 = bitcast i8* %argp.cur2 to double*
-  %5 = load double, double* %4, align 4
-  %mul = fmul double %5, 2.000000e+00
+  %arg1 = alloca ptr, align 4
+  %arg2 = alloca ptr, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg1)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.va_start(ptr nonnull %arg1)
+  call void @llvm.va_copy(ptr nonnull %arg2, ptr nonnull %arg1)
+  %argp.cur = load ptr, ptr %arg1, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 8
+  store ptr %argp.next, ptr %arg1, align 4
+  %0 = load double, ptr %argp.cur, align 4
+  %add = fadd double %0, %a
+  %argp.cur2 = load ptr, ptr %arg2, align 4
+  %argp.next3 = getelementptr inbounds i8, ptr %argp.cur2, i32 8
+  store ptr %argp.next3, ptr %arg2, align 4
+  %1 = load double, ptr %argp.cur2, align 4
+  %mul = fmul double %1, 2.000000e+00
   %add4 = fadd double %add, %mul
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.va_end(i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.va_end(ptr nonnull %arg1)
+  call void @llvm.va_end(ptr nonnull %arg2)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg1)
   ret double %add4
 }
 
@@ -265,8 +253,8 @@ entry:
 ; 32BIT-DAG:     STW killed renamable $r8, 12, %fixed-stack.0 :: (store (s32))
 ; 32BIT-DAG:     STW killed renamable $r9, 16, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 16, align 16)
 ; 32BIT-DAG:     STW killed renamable $r10, 20, %fixed-stack.0 :: (store (s32))
-; 32BIT-DAG:     STW renamable $r3, 0, %stack.0.arg1 :: (store (s32) into %ir.0)
-; 32BIT-DAG:     STW killed renamable $r3, 0, %stack.1.arg2 :: (store (s32) into %ir.1)
+; 32BIT-DAG:     STW renamable $r3, 0, %stack.0.arg1 :: (store (s32) into %ir.arg1)
+; 32BIT-DAG:     STW killed renamable $r3, 0, %stack.1.arg2 :: (store (s32) into %ir.arg2)
 ; 32BIT-DAG:     BLR implicit $lr, implicit $rm, implicit $f1
 
 define double @double_stack_va_arg(double %one, double %two, double %three, double %four, double %five, double %six, double %seven, double %eight, double %nine, double %ten, double %eleven, double %twelve, double %thirteen, ...) local_unnamed_addr  {
@@ -299,14 +287,12 @@ define double @double_stack_va_arg(double %one, double %two, double %three, doub
 ; ASM32-NEXT:    fadd 1, 0, 1
 ; ASM32-NEXT:    blr
 entry:
-  %arg1 = alloca i8*, align 4
-  %arg2 = alloca i8*, align 4
-  %0 = bitcast i8** %arg1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %1 = bitcast i8** %arg2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.va_start(i8* nonnull %0)
-  call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
+  %arg1 = alloca ptr, align 4
+  %arg2 = alloca ptr, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg1)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.va_start(ptr nonnull %arg1)
+  call void @llvm.va_copy(ptr nonnull %arg2, ptr nonnull %arg1)
   %add = fadd double %one, %two
   %add2 = fadd double %add, %three
   %add3 = fadd double %add2, %four
@@ -319,17 +305,15 @@ entry:
   %add10 = fadd double %add9, %eleven
   %add11 = fadd double %add10, %twelve
   %add12 = fadd double %add11, %thirteen
-  %2 = bitcast i8** %arg1 to double**
-  %argp.cur1 = load double*, double** %2, align 4
-  %3 = load double, double* %argp.cur1, align 4
-  %add13 = fadd double %add12, %3
-  %4 = bitcast i8** %arg2 to double**
-  %argp.cur142 = load double*, double** %4, align 4
-  %5 = load double, double* %argp.cur142, align 4
-  %mul = fmul double %5, 2.000000e+00
+  %argp.cur1 = load ptr, ptr %arg1, align 4
+  %0 = load double, ptr %argp.cur1, align 4
+  %add13 = fadd double %add12, %0
+  %argp.cur142 = load ptr, ptr %arg2, align 4
+  %1 = load double, ptr %argp.cur142, align 4
+  %mul = fmul double %1, 2.000000e+00
   %add16 = fadd double %add13, %mul
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg2)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg1)
   ret double %add16
 }
 
@@ -361,7 +345,7 @@ entry:
 ; 32BIT-LABEL:   body:             |
 ; 32BIT-DAG:     liveins: $f1, $f2, $f3, $f4, $f5, $f6, $f7, $f8, $f9, $f10, $f11, $f12, $f13
 ; 32BIT-DAG:     renamable $r4 = ADDI %fixed-stack.0, 0
-; 32BIT-DAG:     STW killed renamable $r4, 0, %stack.0.arg1 :: (store (s32) into %ir.0)
+; 32BIT-DAG:     STW killed renamable $r4, 0, %stack.0.arg1 :: (store (s32) into %ir.arg1)
 ; 32BIT-DAG:     renamable $r4 = LWZ 0, %fixed-stack.0 :: (load (s32) from %ir.argp.cur142, align 16)
 ; 32BIT-DAG:     renamable $f1 = nofpexcept FADD killed renamable $f0, killed renamable $f1, implicit $rm
 ; 32BIT-DAG:     renamable $f0 = nofpexcept FADD killed renamable $f1, killed renamable $f2, implicit $rm

diff  --git a/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee-split.ll b/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee-split.ll
index e4b134cf87b02..3e50d61a5d2fb 100644
--- a/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee-split.ll
+++ b/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee-split.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -opaque-pointers=0 -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
+; RUN: llc -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
 ; RUN:     -mcpu=pwr7  -mtriple powerpc-ibm-aix-xcoff < %s | \
 ; RUN: FileCheck %s
 
@@ -17,33 +17,32 @@ define <4 x i32> @split_spill(double %d1, double %d2, double %d3, ...) {
   ; CHECK:   STW [[COPY]], 4, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 4)
   ; CHECK:   LIFETIME_START %stack.0.arg_list
   ; CHECK:   [[ADDI:%[0-9]+]]:gprc = ADDI %fixed-stack.0, 0
-  ; CHECK:   [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero, killed [[ADDI]] :: (load (s128) from %ir.4)
+  ; CHECK:   [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero, killed [[ADDI]] :: (load (s128) from %ir.argp.cur.aligned)
   ; CHECK:   LIFETIME_END %stack.0.arg_list
   ; CHECK:   $v2 = COPY [[LXVW4X]]
   ; CHECK:   BLR implicit $lr, implicit $rm, implicit $v2
 entry:
-  %arg_list = alloca i8*, align 4
-  %0 = bitcast i8** %arg_list to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %argp.cur = load i8*, i8** %arg_list, align 4
-  %1 = ptrtoint i8* %argp.cur to i32
-  %2 = add i32 %1, 15
-  %3 = and i32 %2, -16
-  %argp.cur.aligned = inttoptr i32 %3 to i8*
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i32 16
-  store i8* %argp.next, i8** %arg_list, align 4
-  %4 = inttoptr i32 %3 to <4 x i32>*
-  %5 = load <4 x i32>, <4 x i32>* %4, align 16
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
-  ret <4 x i32> %5
+  %arg_list = alloca ptr, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg_list)
+  call void @llvm.va_start(ptr nonnull %arg_list)
+  %argp.cur = load ptr, ptr %arg_list, align 4
+  %0 = ptrtoint ptr %argp.cur to i32
+  %1 = add i32 %0, 15
+  %2 = and i32 %1, -16
+  %argp.cur.aligned = inttoptr i32 %2 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 16
+  store ptr %argp.next, ptr %arg_list, align 4
+  %3 = inttoptr i32 %2 to ptr
+  %4 = load <4 x i32>, ptr %3, align 16
+  call void @llvm.va_end(ptr nonnull %arg_list)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg_list)
+  ret <4 x i32> %4
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)

diff  --git a/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee.ll b/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee.ll
index de31e8ea31f50..e3eb864735277 100644
--- a/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee.ll
+++ b/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-callee.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -opaque-pointers=0 -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
+; RUN: llc -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
 ; RUN:     -mcpu=pwr7  -mtriple powerpc-ibm-aix-xcoff < %s | \
 ; RUN: FileCheck %s
 
@@ -24,37 +24,36 @@ define <4 x i32> @callee(i32 %count, ...) {
   ; CHECK:   STW [[COPY]], 24, %fixed-stack.0 :: (store (s32))
   ; CHECK:   LIFETIME_START %stack.0.arg_list
   ; CHECK:   [[ADDI:%[0-9]+]]:gprc = ADDI %fixed-stack.0, 0
-  ; CHECK:   STW killed [[ADDI]], 0, %stack.0.arg_list :: (store (s32) into %ir.0)
+  ; CHECK:   STW killed [[ADDI]], 0, %stack.0.arg_list :: (store (s32) into %ir.arg_list)
   ; CHECK:   [[ADDI1:%[0-9]+]]:gprc = ADDI %fixed-stack.0, 15
   ; CHECK:   [[RLWINM:%[0-9]+]]:gprc = RLWINM killed [[ADDI1]], 0, 0, 27
-  ; CHECK:   [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero, killed [[RLWINM]] :: (load (s128) from %ir.4)
+  ; CHECK:   [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero, killed [[RLWINM]] :: (load (s128) from %ir.argp.cur.aligned)
   ; CHECK:   LIFETIME_END %stack.0.arg_list
   ; CHECK:   $v2 = COPY [[LXVW4X]]
   ; CHECK:   BLR implicit $lr, implicit $rm, implicit $v2
 entry:
-  %arg_list = alloca i8*, align 4
-  %0 = bitcast i8** %arg_list to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %argp.cur = load i8*, i8** %arg_list, align 4
-  %1 = ptrtoint i8* %argp.cur to i32
-  %2 = add i32 %1, 15
-  %3 = and i32 %2, -16
-  %argp.cur.aligned = inttoptr i32 %3 to i8*
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i32 16
-  store i8* %argp.next, i8** %arg_list, align 4
-  %4 = inttoptr i32 %3 to <4 x i32>*
-  %5 = load <4 x i32>, <4 x i32>* %4, align 16
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
-  ret <4 x i32> %5
+  %arg_list = alloca ptr, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg_list)
+  call void @llvm.va_start(ptr nonnull %arg_list)
+  %argp.cur = load ptr, ptr %arg_list, align 4
+  %0 = ptrtoint ptr %argp.cur to i32
+  %1 = add i32 %0, 15
+  %2 = and i32 %1, -16
+  %argp.cur.aligned = inttoptr i32 %2 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 16
+  store ptr %argp.next, ptr %arg_list, align 4
+  %3 = inttoptr i32 %2 to ptr
+  %4 = load <4 x i32>, ptr %3, align 16
+  call void @llvm.va_end(ptr nonnull %arg_list)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg_list)
+  ret <4 x i32> %4
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 

diff  --git a/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-fixed-callee.ll b/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-fixed-callee.ll
index 19485a1411f95..bd3076d64678a 100644
--- a/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-fixed-callee.ll
+++ b/llvm/test/CodeGen/PowerPC/aix32-vector-vararg-fixed-callee.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -opaque-pointers=0 -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
+; RUN: llc -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
 ; RUN:     -mcpu=pwr7  -mtriple powerpc-ibm-aix-xcoff < %s | \
 ; RUN: FileCheck %s
 
@@ -11,38 +11,36 @@ define double @callee(i32 %count, <4 x i32> %vsi, double %next, ...) {
   ; CHECK: bb.0.entry:
   ; CHECK:   LIFETIME_START %stack.0.arg_list
   ; CHECK:   [[ADDI:%[0-9]+]]:gprc = ADDI %fixed-stack.0, 0
-  ; CHECK:   STW killed [[ADDI]], 0, %stack.0.arg_list :: (store (s32) into %ir.0)
+  ; CHECK:   STW killed [[ADDI]], 0, %stack.0.arg_list :: (store (s32) into %ir.arg_list)
   ; CHECK:   [[ADDI1:%[0-9]+]]:gprc = ADDI %fixed-stack.0, 15
   ; CHECK:   [[RLWINM:%[0-9]+]]:gprc_and_gprc_nor0 = RLWINM killed [[ADDI1]], 0, 0, 27
-  ; CHECK:   [[LFD:%[0-9]+]]:f8rc = LFD 16, killed [[RLWINM]] :: (load (s64) from %ir.4, align 16)
+  ; CHECK:   [[LFD:%[0-9]+]]:f8rc = LFD 16, killed [[RLWINM]] :: (load (s64) from %ir.argp.next, align 16)
   ; CHECK:   LIFETIME_END %stack.0.arg_list
   ; CHECK:   $f1 = COPY [[LFD]]
   ; CHECK:   BLR implicit $lr, implicit $rm, implicit $f1
 entry:
-  %arg_list = alloca i8*, align 4
-  %0 = bitcast i8** %arg_list to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %argp.cur = load i8*, i8** %arg_list, align 4
-  %1 = ptrtoint i8* %argp.cur to i32
-  %2 = add i32 %1, 15
-  %3 = and i32 %2, -16
-  %argp.cur.aligned = inttoptr i32 %3 to i8*
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i32 16
-  %argp.next3 = getelementptr inbounds i8, i8* %argp.cur.aligned, i32 24
-  store i8* %argp.next3, i8** %arg_list, align 4
-  %4 = bitcast i8* %argp.next to double*
-  %5 = load double, double* %4, align 16
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
-  ret double %5
+  %arg_list = alloca ptr, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %arg_list)
+  call void @llvm.va_start(ptr nonnull %arg_list)
+  %argp.cur = load ptr, ptr %arg_list, align 4
+  %0 = ptrtoint ptr %argp.cur to i32
+  %1 = add i32 %0, 15
+  %2 = and i32 %1, -16
+  %argp.cur.aligned = inttoptr i32 %2 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 16
+  %argp.next3 = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 24
+  store ptr %argp.next3, ptr %arg_list, align 4
+  %3 = load double, ptr %argp.next, align 16
+  call void @llvm.va_end(ptr nonnull %arg_list)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %arg_list)
+  ret double %3
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 

diff  --git a/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-callee.ll b/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-callee.ll
index 08403f00e984c..3349709dbc02d 100644
--- a/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-callee.ll
+++ b/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-callee.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -opaque-pointers=0 -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
+; RUN: llc -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
 ; RUN:     -mcpu=pwr7  -mtriple powerpc64-ibm-aix-xcoff < %s | \
 ; RUN: FileCheck %s
 
@@ -24,37 +24,36 @@ define <4 x i32> @callee(i32 signext %count, ...) {
   ; CHECK:   STD [[COPY]], 48, %fixed-stack.0 :: (store (s64))
   ; CHECK:   LIFETIME_START %stack.0.arg_list
   ; CHECK:   [[ADDI8_:%[0-9]+]]:g8rc = ADDI8 %fixed-stack.0, 0
-  ; CHECK:   STD killed [[ADDI8_]], 0, %stack.0.arg_list :: (store (s64) into %ir.0)
+  ; CHECK:   STD killed [[ADDI8_]], 0, %stack.0.arg_list :: (store (s64) into %ir.arg_list)
   ; CHECK:   [[ADDI8_1:%[0-9]+]]:g8rc = ADDI8 %fixed-stack.0, 15
   ; CHECK:   [[RLDICR:%[0-9]+]]:g8rc = RLDICR killed [[ADDI8_1]], 0, 59
-  ; CHECK:   [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero8, killed [[RLDICR]] :: (load (s128) from %ir.4)
+  ; CHECK:   [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero8, killed [[RLDICR]] :: (load (s128) from %ir.argp.cur.aligned)
   ; CHECK:   LIFETIME_END %stack.0.arg_list
   ; CHECK:   $v2 = COPY [[LXVW4X]]
   ; CHECK:   BLR8 implicit $lr8, implicit $rm, implicit $v2
 entry:
-  %arg_list = alloca i8*, align 8
-  %0 = bitcast i8** %arg_list to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %argp.cur = load i8*, i8** %arg_list, align 8
-  %1 = ptrtoint i8* %argp.cur to i64
-  %2 = add i64 %1, 15
-  %3 = and i64 %2, -16
-  %argp.cur.aligned = inttoptr i64 %3 to i8*
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i64 16
-  store i8* %argp.next, i8** %arg_list, align 8
-  %4 = inttoptr i64 %3 to <4 x i32>*
-  %5 = load <4 x i32>, <4 x i32>* %4, align 16
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
-  ret <4 x i32> %5
+  %arg_list = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %arg_list)
+  call void @llvm.va_start(ptr nonnull %arg_list)
+  %argp.cur = load ptr, ptr %arg_list, align 8
+  %0 = ptrtoint ptr %argp.cur to i64
+  %1 = add i64 %0, 15
+  %2 = and i64 %1, -16
+  %argp.cur.aligned = inttoptr i64 %2 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
+  store ptr %argp.next, ptr %arg_list, align 8
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load <4 x i32>, ptr %3, align 16
+  call void @llvm.va_end(ptr nonnull %arg_list)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %arg_list)
+  ret <4 x i32> %4
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 

diff  --git a/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-fixed-callee.ll b/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-fixed-callee.ll
index 3f58a9f2c1ec7..0024acf49bce4 100644
--- a/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-fixed-callee.ll
+++ b/llvm/test/CodeGen/PowerPC/aix64-vector-vararg-fixed-callee.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -opaque-pointers=0 -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
+; RUN: llc -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
 ; RUN:     -mcpu=pwr7  -mtriple powerpc64-ibm-aix-xcoff < %s | \
 ; RUN: FileCheck %s
 
@@ -18,37 +18,35 @@ define double @callee(i32 signext %count, <4 x i32> %vsi, double %next, ...) {
   ; CHECK:   STD [[COPY]], 16, %fixed-stack.0 :: (store (s64))
   ; CHECK:   LIFETIME_START %stack.0.arg_list
   ; CHECK:   [[ADDI8_:%[0-9]+]]:g8rc = ADDI8 %fixed-stack.0, 0
-  ; CHECK:   STD killed [[ADDI8_]], 0, %stack.0.arg_list :: (store (s64) into %ir.0)
+  ; CHECK:   STD killed [[ADDI8_]], 0, %stack.0.arg_list :: (store (s64) into %ir.arg_list)
   ; CHECK:   [[ADDI8_1:%[0-9]+]]:g8rc = ADDI8 %fixed-stack.0, 15
   ; CHECK:   [[RLDICR:%[0-9]+]]:g8rc_and_g8rc_nox0 = RLDICR killed [[ADDI8_1]], 0, 59
-  ; CHECK:   [[LFD:%[0-9]+]]:f8rc = LFD 16, killed [[RLDICR]] :: (load (s64) from %ir.4, align 16)
+  ; CHECK:   [[LFD:%[0-9]+]]:f8rc = LFD 16, killed [[RLDICR]] :: (load (s64) from %ir.argp.next, align 16)
   ; CHECK:   LIFETIME_END %stack.0.arg_list
   ; CHECK:   $f1 = COPY [[LFD]]
   ; CHECK:   BLR8 implicit $lr8, implicit $rm, implicit $f1
 entry:
-  %arg_list = alloca i8*, align 8
-  %0 = bitcast i8** %arg_list to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %argp.cur = load i8*, i8** %arg_list, align 8
-  %1 = ptrtoint i8* %argp.cur to i64
-  %2 = add i64 %1, 15
-  %3 = and i64 %2, -16
-  %argp.cur.aligned = inttoptr i64 %3 to i8*
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i64 16
-  %argp.next3 = getelementptr inbounds i8, i8* %argp.cur.aligned, i64 24
-  store i8* %argp.next3, i8** %arg_list, align 8
-  %4 = bitcast i8* %argp.next to double*
-  %5 = load double, double* %4, align 16
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
-  ret double %5
+  %arg_list = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %arg_list)
+  call void @llvm.va_start(ptr nonnull %arg_list)
+  %argp.cur = load ptr, ptr %arg_list, align 8
+  %0 = ptrtoint ptr %argp.cur to i64
+  %1 = add i64 %0, 15
+  %2 = and i64 %1, -16
+  %argp.cur.aligned = inttoptr i64 %2 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
+  %argp.next3 = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 24
+  store ptr %argp.next3, ptr %arg_list, align 8
+  %3 = load double, ptr %argp.next, align 16
+  call void @llvm.va_end(ptr nonnull %arg_list)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %arg_list)
+  ret double %3
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)


        


More information about the llvm-commits mailing list