[llvm] 3419026 - Revert "[TargetLowering] Only inspect attributes in the arguments for ArgListEntry"

Arthur Eubanks via llvm-commits llvm-commits at lists.llvm.org
Sun May 16 22:22:06 PDT 2021


Author: Arthur Eubanks
Date: 2021-05-16T22:02:10-07:00
New Revision: 341902672c3e6068f95837652072b10b92685bfc

URL: https://github.com/llvm/llvm-project/commit/341902672c3e6068f95837652072b10b92685bfc
DIFF: https://github.com/llvm/llvm-project/commit/341902672c3e6068f95837652072b10b92685bfc.diff

LOG: Revert "[TargetLowering] Only inspect attributes in the arguments for ArgListEntry"

This reverts commit 16748bd2fb1fe10d7d097961f1988327338f3f9f.

Causes https://crbug.com/1209013

Added: 
    

Modified: 
    llvm/docs/ReleaseNotes.rst
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/AArch64/arm64-this-return.ll
    llvm/test/CodeGen/AArch64/bitfield-extract.ll
    llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
    llvm/test/CodeGen/AMDGPU/call-argument-types.ll
    llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs-packed.ll
    llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll
    llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
    llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
    llvm/test/CodeGen/ARM/ipra-r0-returned.ll
    llvm/test/CodeGen/ARM/returned-ext.ll
    llvm/test/CodeGen/ARM/this-return.ll
    llvm/test/CodeGen/SPARC/64abi.ll
    llvm/test/CodeGen/SystemZ/args-02.ll
    llvm/test/CodeGen/SystemZ/args-03.ll
    llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
    llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll
    llvm/test/CodeGen/X86/movtopush.ll
    llvm/test/CodeGen/X86/pop-stack-cleanup.ll
    llvm/test/CodeGen/X86/preallocated.ll
    llvm/test/CodeGen/X86/tailcall-msvc-conventions.ll

Removed: 
    llvm/test/CodeGen/X86/mismatched-byval.ll


################################################################################
diff  --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index d96c2080ffa04..98117a24fcee9 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -74,13 +74,6 @@ Changes to building LLVM
 Changes to TableGen
 -------------------
 
-Changes to Backend Code Generation
-----------------------------------
-
-* When lowering calls, only ABI attributes on the call itself are checked, not
-  the caller. Frontends need to make sure to properly set ABI attributes on
-  calls (and always should have).
-
 Changes to the ARM Backend
 --------------------------
 

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 16668bd478703..2df2877d37c55 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -102,32 +102,29 @@ bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
   return true;
 }
 
-/// Set CallLoweringInfo attribute flags based on the call instruction's
-/// argument attributes.
+/// Set CallLoweringInfo attribute flags based on a call instruction
+/// and called function attributes.
 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call,
                                                      unsigned ArgIdx) {
-  auto Attrs = Call->getAttributes();
-
-  IsSExt = Attrs.hasParamAttribute(ArgIdx, Attribute::SExt);
-  IsZExt = Attrs.hasParamAttribute(ArgIdx, Attribute::ZExt);
-  IsInReg = Attrs.hasParamAttribute(ArgIdx, Attribute::InReg);
-  IsSRet = Attrs.hasParamAttribute(ArgIdx, Attribute::StructRet);
-  IsNest = Attrs.hasParamAttribute(ArgIdx, Attribute::Nest);
-  IsReturned = Attrs.hasParamAttribute(ArgIdx, Attribute::Returned);
-  IsSwiftSelf = Attrs.hasParamAttribute(ArgIdx, Attribute::SwiftSelf);
-  IsSwiftAsync = Attrs.hasParamAttr(ArgIdx, Attribute::SwiftAsync);
-  IsSwiftError = Attrs.hasParamAttribute(ArgIdx, Attribute::SwiftError);
-  Alignment = Attrs.getParamStackAlignment(ArgIdx);
-
-  IsByVal = Attrs.hasParamAttribute(ArgIdx, Attribute::ByVal);
+  IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
+  IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
+  IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
+  IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
+  IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
+  IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
+  IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
+  IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
+  IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
+  IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
+  IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
+  IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
+  Alignment = Call->getParamStackAlign(ArgIdx);
   ByValType = nullptr;
   if (IsByVal) {
     ByValType = Call->getParamByValType(ArgIdx);
     if (!Alignment)
       Alignment = Call->getParamAlign(ArgIdx);
   }
-  IsInAlloca = Attrs.hasParamAttribute(ArgIdx, Attribute::InAlloca);
-  IsPreallocated = Attrs.hasParamAttribute(ArgIdx, Attribute::Preallocated);
   PreallocatedType = nullptr;
   if (IsPreallocated)
     PreallocatedType = Call->getParamPreallocatedType(ArgIdx);

diff  --git a/llvm/test/CodeGen/AArch64/arm64-this-return.ll b/llvm/test/CodeGen/AArch64/arm64-this-return.ll
index 0aa5dc9edd411..a10b410b8e8e6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-this-return.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-this-return.ll
@@ -38,9 +38,9 @@ entry:
 ; CHECK-NOT: mov x0, {{x[0-9]+}}
 ; CHECK: b {{_?B_ctor_base}}
   %0 = bitcast %struct.C* %this to %struct.A*
-  %call = tail call %struct.A* @A_ctor_base(%struct.A* returned %0)
+  %call = tail call %struct.A* @A_ctor_base(%struct.A* %0)
   %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
-  %call2 = tail call %struct.B* @B_ctor_base(%struct.B* returned %1, i32 %x)
+  %call2 = tail call %struct.B* @B_ctor_base(%struct.B* %1, i32 %x)
   ret %struct.C* %this
 }
 
@@ -88,7 +88,7 @@ define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
 entry:
 ; CHECK-LABEL: C_ctor_complete:
 ; CHECK: b {{_?C_ctor_base}}
-  %call = tail call %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x)
+  %call = tail call %struct.C* @C_ctor_base(%struct.C* %this, i32 %x)
   ret %struct.C* %this
 }
 
@@ -135,8 +135,8 @@ entry:
 ; CHECK-NOT: mov x0, {{x[0-9]+}}
 ; CHECK: b {{_?B_ctor_complete}}
   %b = getelementptr inbounds %struct.D, %struct.D* %this, i32 0, i32 0
-  %call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
-  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
+  %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
   ret %struct.D* %this
 }
 
@@ -166,8 +166,8 @@ entry:
 ; CHECK-LABEL: E_ctor_base:
 ; CHECK-NOT: b {{_?B_ctor_complete}}
   %b = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 0
-  %call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
+  %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
   %b2 = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 1
-  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b2, i32 %x)
+  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b2, i32 %x)
   ret %struct.E* %this
 }

diff  --git a/llvm/test/CodeGen/AArch64/bitfield-extract.ll b/llvm/test/CodeGen/AArch64/bitfield-extract.ll
index 5462dfd771712..69faf467d0789 100644
--- a/llvm/test/CodeGen/AArch64/bitfield-extract.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield-extract.ll
@@ -91,7 +91,7 @@ define signext i16 @test10(i64 %a) {
 define void @test11(i64 %a) {
   %tmp = lshr i64 %a, 23
   %res = trunc i64 %tmp to i16
-  call void @use(i16 signext %res, i64 %tmp)
+  call void @use(i16 %res, i64 %tmp)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll b/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
index acbf3756141ba..f1eb8d82b54f6 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
@@ -11,7 +11,7 @@ declare void @test_explicit_sret(i1024* sret(i1024)) #0
 ; CHECK-LABEL: _test_tailcall_explicit_sret:
 ; CHECK-NEXT: b _test_explicit_sret
 define void @test_tailcall_explicit_sret(i1024* sret(i1024) %arg) #0 {
-  tail call void @test_explicit_sret(i1024* sret(i1024) %arg)
+  tail call void @test_explicit_sret(i1024* %arg)
   ret void
 }
 
@@ -20,7 +20,7 @@ define void @test_tailcall_explicit_sret(i1024* sret(i1024) %arg) #0 {
 ; CHECK: bl _test_explicit_sret
 ; CHECK: ret
 define void @test_call_explicit_sret(i1024* sret(i1024) %arg) #0 {
-  call void @test_explicit_sret(i1024* sret(i1024) %arg)
+  call void @test_explicit_sret(i1024* %arg)
   ret void
 }
 
@@ -30,7 +30,7 @@ define void @test_call_explicit_sret(i1024* sret(i1024) %arg) #0 {
 ; CHECK: ret
 define void @test_tailcall_explicit_sret_alloca_unused() #0 {
   %l = alloca i1024, align 8
-  tail call void @test_explicit_sret(i1024* sret(i1024) %l)
+  tail call void @test_explicit_sret(i1024* %l)
   ret void
 }
 
@@ -44,7 +44,7 @@ define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
   %l = alloca i1024, align 8
   %r = load i1024, i1024* %ptr, align 8
   store i1024 %r, i1024* %l, align 8
-  tail call void @test_explicit_sret(i1024* sret(i1024) %l)
+  tail call void @test_explicit_sret(i1024* %l)
   ret void
 }
 
@@ -56,7 +56,7 @@ define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
 ; CHECK: ret
 define void @test_tailcall_explicit_sret_gep(i1024* %ptr) #0 {
   %ptr2 = getelementptr i1024, i1024* %ptr, i32 1
-  tail call void @test_explicit_sret(i1024* sret(i1024) %ptr2)
+  tail call void @test_explicit_sret(i1024* %ptr2)
   ret void
 }
 
@@ -69,7 +69,7 @@ define void @test_tailcall_explicit_sret_gep(i1024* %ptr) #0 {
 ; CHECK: ret
 define i1024 @test_tailcall_explicit_sret_alloca_returned() #0 {
   %l = alloca i1024, align 8
-  tail call void @test_explicit_sret(i1024* sret(i1024) %l)
+  tail call void @test_explicit_sret(i1024* %l)
   %r = load i1024, i1024* %l, align 8
   ret i1024 %r
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
index 503a18dd36f7b..7f85f055082e7 100644
--- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
@@ -92,7 +92,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_imm() #0 {
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
   %var = load volatile i1, i1 addrspace(1)* undef
-  call void @external_void_func_i1_signext(i1 signext %var)
+  call void @external_void_func_i1_signext(i1 %var)
   ret void
 }
 
@@ -113,7 +113,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @test_call_external_void_func_i1_zeroext(i32) #0 {
   %var = load volatile i1, i1 addrspace(1)* undef
-  call void @external_void_func_i1_zeroext(i1 zeroext %var)
+  call void @external_void_func_i1_zeroext(i1 %var)
   ret void
 }
 
@@ -148,7 +148,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_imm(i32) #0 {
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 {
   %var = load volatile i8, i8 addrspace(1)* undef
-  call void @external_void_func_i8_signext(i8 signext %var)
+  call void @external_void_func_i8_signext(i8 %var)
   ret void
 }
 
@@ -166,7 +166,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 {
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @test_call_external_void_func_i8_zeroext(i32) #0 {
   %var = load volatile i8, i8 addrspace(1)* undef
-  call void @external_void_func_i8_zeroext(i8 zeroext %var)
+  call void @external_void_func_i8_zeroext(i8 %var)
   ret void
 }
 
@@ -195,7 +195,7 @@ define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 {
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
   %var = load volatile i16, i16 addrspace(1)* undef
-  call void @external_void_func_i16_signext(i16 signext %var)
+  call void @external_void_func_i16_signext(i16 %var)
   ret void
 }
 
@@ -212,7 +212,7 @@ define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @test_call_external_void_func_i16_zeroext(i32) #0 {
   %var = load volatile i16, i16 addrspace(1)* undef
-  call void @external_void_func_i16_zeroext(i16 zeroext %var)
+  call void @external_void_func_i16_zeroext(i16 %var)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs-packed.ll b/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs-packed.ll
index 807124bf6d6ba..b02c6c9cb532c 100644
--- a/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs-packed.ll
+++ b/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs-packed.ll
@@ -517,7 +517,7 @@ define amdgpu_kernel void @kern_call_too_many_args_use_workitem_id_x_byval() #1
     i32 210, i32 220, i32 230, i32 240,
     i32 250, i32 260, i32 270, i32 280,
     i32 290, i32 300, i32 310, i32 320,
-    i32 addrspace(5)* byval(i32) %alloca)
+    i32 addrspace(5)* %alloca)
   ret void
 }
 
@@ -541,7 +541,7 @@ define void @func_call_too_many_args_use_workitem_id_x_byval() #1 {
     i32 210, i32 220, i32 230, i32 240,
     i32 250, i32 260, i32 270, i32 280,
     i32 290, i32 300, i32 310, i32 320,
-    i32 addrspace(5)* byval(i32) %alloca)
+    i32 addrspace(5)* %alloca)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll b/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll
index e44bf8f57cf8e..1a502d5e655e8 100644
--- a/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll
+++ b/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll
@@ -649,7 +649,7 @@ define amdgpu_kernel void @kern_call_too_many_args_use_workitem_id_x_byval() #1
     i32 210, i32 220, i32 230, i32 240,
     i32 250, i32 260, i32 270, i32 280,
     i32 290, i32 300, i32 310, i32 320,
-    i32 addrspace(5)* byval(i32) %alloca)
+    i32 addrspace(5)* %alloca)
   ret void
 }
 
@@ -686,7 +686,7 @@ define void @func_call_too_many_args_use_workitem_id_x_byval() #1 {
     i32 210, i32 220, i32 230, i32 240,
     i32 250, i32 260, i32 270, i32 280,
     i32 290, i32 300, i32 310, i32 320,
-    i32 addrspace(5)* byval(i32) %alloca)
+    i32 addrspace(5)* %alloca)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
index bbfc9813c7eaa..e175d532b34e6 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
@@ -214,7 +214,7 @@ define amdgpu_gfx void @test_call_external_void_func_i1_signext(i32) #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %var = load volatile i1, i1 addrspace(1)* undef
-  call amdgpu_gfx void @external_void_func_i1_signext(i1 signext%var)
+  call amdgpu_gfx void @external_void_func_i1_signext(i1 %var)
   ret void
 }
 
@@ -280,7 +280,7 @@ define amdgpu_gfx void @test_call_external_void_func_i1_zeroext(i32) #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %var = load volatile i1, i1 addrspace(1)* undef
-  call amdgpu_gfx void @external_void_func_i1_zeroext(i1 zeroext %var)
+  call amdgpu_gfx void @external_void_func_i1_zeroext(i1 %var)
   ret void
 }
 
@@ -401,7 +401,7 @@ define amdgpu_gfx void @test_call_external_void_func_i8_signext(i32) #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %var = load volatile i8, i8 addrspace(1)* undef
-  call amdgpu_gfx void @external_void_func_i8_signext(i8 signext %var)
+  call amdgpu_gfx void @external_void_func_i8_signext(i8 %var)
   ret void
 }
 
@@ -463,7 +463,7 @@ define amdgpu_gfx void @test_call_external_void_func_i8_zeroext(i32) #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %var = load volatile i8, i8 addrspace(1)* undef
-  call amdgpu_gfx void @external_void_func_i8_zeroext(i8 zeroext %var)
+  call amdgpu_gfx void @external_void_func_i8_zeroext(i8 %var)
   ret void
 }
 
@@ -584,7 +584,7 @@ define amdgpu_gfx void @test_call_external_void_func_i16_signext(i32) #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %var = load volatile i16, i16 addrspace(1)* undef
-  call amdgpu_gfx void @external_void_func_i16_signext(i16 signext %var)
+  call amdgpu_gfx void @external_void_func_i16_signext(i16 %var)
   ret void
 }
 
@@ -646,7 +646,7 @@ define amdgpu_gfx void @test_call_external_void_func_i16_zeroext(i32) #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %var = load volatile i16, i16 addrspace(1)* undef
-  call amdgpu_gfx void @external_void_func_i16_zeroext(i16 zeroext %var)
+  call amdgpu_gfx void @external_void_func_i16_zeroext(i16 %var)
   ret void
 }
 
@@ -3081,7 +3081,7 @@ define amdgpu_gfx void @test_call_external_void_func_byval_struct_i8_i32() #0 {
   %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %val, i32 0, i32 1
   store i8 3, i8 addrspace(5)* %gep0
   store i32 8, i32 addrspace(5)* %gep1
-  call amdgpu_gfx void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %val)
+  call amdgpu_gfx void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* %val)
   ret void
 }
 
@@ -3173,7 +3173,7 @@ define amdgpu_gfx void @test_call_external_void_func_sret_struct_i8_i32_byval_st
   %in.gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %in.val, i32 0, i32 1
   store i8 3, i8 addrspace(5)* %in.gep0
   store i32 8, i32 addrspace(5)* %in.gep1
-  call amdgpu_gfx void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i32 }) %out.val, { i8, i32 } addrspace(5)* byval({ i8, i32 }) %in.val)
+  call amdgpu_gfx void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* %out.val, { i8, i32 } addrspace(5)* %in.val)
   %out.gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %out.val, i32 0, i32 0
   %out.gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %out.val, i32 0, i32 1
   %out.val0 = load i8, i8 addrspace(5)* %out.gep0
@@ -3384,7 +3384,7 @@ define amdgpu_gfx void @test_call_external_void_func_i1_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_i1_inreg(i1 inreg true)
+  call amdgpu_gfx void @external_void_func_i1_inreg(i1 true)
   ret void
 }
 
@@ -3443,7 +3443,7 @@ define amdgpu_gfx void @test_call_external_void_func_i8_imm_inreg(i32) #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_i8_inreg(i8 inreg 123)
+  call amdgpu_gfx void @external_void_func_i8_inreg(i8 123)
   ret void
 }
 
@@ -3502,7 +3502,7 @@ define amdgpu_gfx void @test_call_external_void_func_i16_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_i16_inreg(i16 inreg 123)
+  call amdgpu_gfx void @external_void_func_i16_inreg(i16 123)
   ret void
 }
 
@@ -3561,7 +3561,7 @@ define amdgpu_gfx void @test_call_external_void_func_i32_imm_inreg(i32) #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_i32_inreg(i32 inreg 42)
+  call amdgpu_gfx void @external_void_func_i32_inreg(i32 42)
   ret void
 }
 
@@ -3622,7 +3622,7 @@ define amdgpu_gfx void @test_call_external_void_func_i64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_i64_inreg(i64 inreg 123)
+  call amdgpu_gfx void @external_void_func_i64_inreg(i64 123)
   ret void
 }
 
@@ -3684,7 +3684,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <2 x i64>, <2 x i64> addrspace(4)* null
-  call amdgpu_gfx void @external_void_func_v2i64_inreg(<2 x i64> inreg %val)
+  call amdgpu_gfx void @external_void_func_v2i64_inreg(<2 x i64> %val)
   ret void
 }
 
@@ -3749,7 +3749,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v2i64_inreg(<2 x i64> inreg <i64 8589934593, i64 17179869187>)
+  call amdgpu_gfx void @external_void_func_v2i64_inreg(<2 x i64> <i64 8589934593, i64 17179869187>)
   ret void
 }
 
@@ -3817,7 +3817,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3i64_inreg() #0 {
   %load = load <2 x i64>, <2 x i64> addrspace(4)* null
   %val = shufflevector <2 x i64> %load, <2 x i64> <i64 8589934593, i64 undef>, <3 x i32> <i32 0, i32 1, i32 2>
 
-  call amdgpu_gfx void @external_void_func_v3i64_inreg(<3 x i64> inreg %val)
+  call amdgpu_gfx void @external_void_func_v3i64_inreg(<3 x i64> %val)
   ret void
 }
 
@@ -3888,7 +3888,7 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64_inreg() #0 {
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %load = load <2 x i64>, <2 x i64> addrspace(4)* null
   %val = shufflevector <2 x i64> %load, <2 x i64> <i64 8589934593, i64 17179869187>, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  call amdgpu_gfx void @external_void_func_v4i64_inreg(<4 x i64> inreg %val)
+  call amdgpu_gfx void @external_void_func_v4i64_inreg(<4 x i64> %val)
   ret void
 }
 
@@ -3947,7 +3947,7 @@ define amdgpu_gfx void @test_call_external_void_func_f16_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_f16_inreg(half inreg 4.0)
+  call amdgpu_gfx void @external_void_func_f16_inreg(half 4.0)
   ret void
 }
 
@@ -4006,7 +4006,7 @@ define amdgpu_gfx void @test_call_external_void_func_f32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_f32_inreg(float inreg 4.0)
+  call amdgpu_gfx void @external_void_func_f32_inreg(float 4.0)
   ret void
 }
 
@@ -4067,7 +4067,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2f32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v2f32_inreg(<2 x float> inreg <float 1.0, float 2.0>)
+  call amdgpu_gfx void @external_void_func_v2f32_inreg(<2 x float> <float 1.0, float 2.0>)
   ret void
 }
 
@@ -4130,7 +4130,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v3f32_inreg(<3 x float> inreg <float 1.0, float 2.0, float 4.0>)
+  call amdgpu_gfx void @external_void_func_v3f32_inreg(<3 x float> <float 1.0, float 2.0, float 4.0>)
   ret void
 }
 
@@ -4197,7 +4197,7 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v5f32_inreg(<5 x float> inreg <float 1.0, float 2.0, float 4.0, float -1.0, float 0.5>)
+  call amdgpu_gfx void @external_void_func_v5f32_inreg(<5 x float> <float 1.0, float 2.0, float 4.0, float -1.0, float 0.5>)
   ret void
 }
 
@@ -4258,7 +4258,7 @@ define amdgpu_gfx void @test_call_external_void_func_f64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_f64_inreg(double inreg 4.0)
+  call amdgpu_gfx void @external_void_func_f64_inreg(double 4.0)
   ret void
 }
 
@@ -4323,7 +4323,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v2f64_inreg(<2 x double> inreg <double 2.0, double 4.0>)
+  call amdgpu_gfx void @external_void_func_v2f64_inreg(<2 x double> <double 2.0, double 4.0>)
   ret void
 }
 
@@ -4392,7 +4392,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v3f64_inreg(<3 x double> inreg <double 2.0, double 4.0, double 8.0>)
+  call amdgpu_gfx void @external_void_func_v3f64_inreg(<3 x double> <double 2.0, double 4.0, double 8.0>)
   ret void
 }
 
@@ -4452,7 +4452,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2i16_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <2 x i16>, <2 x i16> addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v2i16_inreg(<2 x i16> inreg %val)
+  call amdgpu_gfx void @external_void_func_v2i16_inreg(<2 x i16> %val)
   ret void
 }
 
@@ -4512,7 +4512,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <3 x i16>, <3 x i16> addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v3i16_inreg(<3 x i16> inreg %val)
+  call amdgpu_gfx void @external_void_func_v3i16_inreg(<3 x i16> %val)
   ret void
 }
 
@@ -4572,7 +4572,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <3 x half>, <3 x half> addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v3f16_inreg(<3 x half> inreg %val)
+  call amdgpu_gfx void @external_void_func_v3f16_inreg(<3 x half> %val)
   ret void
 }
 
@@ -4633,7 +4633,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v3i16_inreg(<3 x i16> inreg <i16 1, i16 2, i16 3>)
+  call amdgpu_gfx void @external_void_func_v3i16_inreg(<3 x i16> <i16 1, i16 2, i16 3>)
   ret void
 }
 
@@ -4694,7 +4694,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v3f16_inreg(<3 x half> inreg <half 1.0, half 2.0, half 4.0>)
+  call amdgpu_gfx void @external_void_func_v3f16_inreg(<3 x half> <half 1.0, half 2.0, half 4.0>)
   ret void
 }
 
@@ -4754,7 +4754,7 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <4 x i16>, <4 x i16> addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v4i16_inreg(<4 x i16> inreg %val)
+  call amdgpu_gfx void @external_void_func_v4i16_inreg(<4 x i16> %val)
   ret void
 }
 
@@ -4815,7 +4815,7 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v4i16_inreg(<4 x i16> inreg <i16 1, i16 2, i16 3, i16 4>)
+  call amdgpu_gfx void @external_void_func_v4i16_inreg(<4 x i16> <i16 1, i16 2, i16 3, i16 4>)
   ret void
 }
 
@@ -4875,7 +4875,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2f16_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <2 x half>, <2 x half> addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v2f16_inreg(<2 x half> inreg %val)
+  call amdgpu_gfx void @external_void_func_v2f16_inreg(<2 x half> %val)
   ret void
 }
 
@@ -4935,7 +4935,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <2 x i32>, <2 x i32> addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v2i32_inreg(<2 x i32> inreg %val)
+  call amdgpu_gfx void @external_void_func_v2i32_inreg(<2 x i32> %val)
   ret void
 }
 
@@ -4996,7 +4996,7 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v2i32_inreg(<2 x i32> inreg <i32 1, i32 2>)
+  call amdgpu_gfx void @external_void_func_v2i32_inreg(<2 x i32> <i32 1, i32 2>)
   ret void
 }
 
@@ -5059,7 +5059,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm_inreg(i32) #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v3i32_inreg(<3 x i32> inreg <i32 3, i32 4, i32 5>)
+  call amdgpu_gfx void @external_void_func_v3i32_inreg(<3 x i32> <i32 3, i32 4, i32 5>)
   ret void
 }
 
@@ -5124,7 +5124,7 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32_inreg(i32) #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v3i32_i32_inreg(<3 x i32> inreg <i32 3, i32 4, i32 5>, i32 inreg 6)
+  call amdgpu_gfx void @external_void_func_v3i32_i32_inreg(<3 x i32> <i32 3, i32 4, i32 5>, i32 6)
   ret void
 }
 
@@ -5184,7 +5184,7 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %val = load <4 x i32>, <4 x i32> addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v4i32_inreg(<4 x i32> inreg %val)
+  call amdgpu_gfx void @external_void_func_v4i32_inreg(<4 x i32> %val)
   ret void
 }
 
@@ -5249,7 +5249,7 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v4i32_inreg(<4 x i32> inreg <i32 1, i32 2, i32 3, i32 4>)
+  call amdgpu_gfx void @external_void_func_v4i32_inreg(<4 x i32> <i32 1, i32 2, i32 3, i32 4>)
   ret void
 }
 
@@ -5316,7 +5316,7 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v5i32_inreg(<5 x i32> inreg <i32 1, i32 2, i32 3, i32 4, i32 5>)
+  call amdgpu_gfx void @external_void_func_v5i32_inreg(<5 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5>)
   ret void
 }
 
@@ -5381,7 +5381,7 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_inreg() #0 {
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %ptr = load <8 x i32> addrspace(4)*, <8 x i32> addrspace(4)* addrspace(4)* undef
   %val = load <8 x i32>, <8 x i32> addrspace(4)* %ptr
-  call amdgpu_gfx void @external_void_func_v8i32_inreg(<8 x i32> inreg %val)
+  call amdgpu_gfx void @external_void_func_v8i32_inreg(<8 x i32> %val)
   ret void
 }
 
@@ -5454,7 +5454,7 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s6
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
-  call amdgpu_gfx void @external_void_func_v8i32_inreg(<8 x i32> inreg <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>)
+  call amdgpu_gfx void @external_void_func_v8i32_inreg(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>)
   ret void
 }
 
@@ -5519,7 +5519,7 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32_inreg() #0 {
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %ptr = load <16 x i32> addrspace(4)*, <16 x i32> addrspace(4)* addrspace(4)* undef
   %val = load <16 x i32>, <16 x i32> addrspace(4)* %ptr
-  call amdgpu_gfx void @external_void_func_v16i32_inreg(<16 x i32> inreg %val)
+  call amdgpu_gfx void @external_void_func_v16i32_inreg(<16 x i32> %val)
   ret void
 }
 
@@ -5697,7 +5697,7 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX10-NEXT:    s_setpc_b64 s[4:5]
   %ptr = load <32 x i32> addrspace(4)*, <32 x i32> addrspace(4)* addrspace(4)* undef
   %val = load <32 x i32>, <32 x i32> addrspace(4)* %ptr
-  call amdgpu_gfx void @external_void_func_v32i32_inreg(<32 x i32> inreg %val)
+  call amdgpu_gfx void @external_void_func_v32i32_inreg(<32 x i32> %val)
   ret void
 }
 
@@ -5883,7 +5883,7 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32_inreg(i32) #0 {
   %ptr0 = load <32 x i32> addrspace(4)*, <32 x i32> addrspace(4)* addrspace(4)* undef
   %val0 = load <32 x i32>, <32 x i32> addrspace(4)* %ptr0
   %val1 = load i32, i32 addrspace(4)* undef
-  call amdgpu_gfx void @external_void_func_v32i32_i32_inreg(<32 x i32> inreg %val0, i32 inreg %val1)
+  call amdgpu_gfx void @external_void_func_v32i32_i32_inreg(<32 x i32> %val0, i32 %val1)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll b/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
index e6de73faefbe5..246f06a84bfa4 100644
--- a/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
+++ b/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
@@ -24,6 +24,6 @@ define amdgpu_gfx float @caller(float %arg0) {
 ; GCN-NEXT:    s_addc_u32 s7, s7, callee at rel32@hi+12
 ; GCN-NEXT:    s_setpc_b64 s[6:7]
   %add = fadd float %arg0, 1.0
-  %call = tail call amdgpu_gfx float @callee(float %add, float inreg 2.0)
+  %call = tail call amdgpu_gfx float @callee(float %add, float 2.0)
   ret float %call
 }

diff  --git a/llvm/test/CodeGen/ARM/ipra-r0-returned.ll b/llvm/test/CodeGen/ARM/ipra-r0-returned.ll
index 212ff9f74261b..cd3069c0c5819 100644
--- a/llvm/test/CodeGen/ARM/ipra-r0-returned.ll
+++ b/llvm/test/CodeGen/ARM/ipra-r0-returned.ll
@@ -13,6 +13,6 @@ entry:
 ; CHECK-NOT: r0
 ; CHECK: bl      returns_r0
 ; CHECK-NOT: r0
-  %b = call i32 @returns_r0(i32 returned %a)
+  %b = call i32 @returns_r0(i32 %a)
   ret i32 %a
 }

diff  --git a/llvm/test/CodeGen/ARM/returned-ext.ll b/llvm/test/CodeGen/ARM/returned-ext.ll
index 8832b3f371914..da3511b9c7817 100644
--- a/llvm/test/CodeGen/ARM/returned-ext.ll
+++ b/llvm/test/CodeGen/ARM/returned-ext.ll
@@ -22,9 +22,9 @@ entry:
 ; CHECKT2D: uxth r0, r0
 ; CHECKT2D: bl _identity32
 ; CHECKT2D: mov r0, [[SAVEX]]
-  %call = tail call i16 @identity16(i16 returned %x)
+  %call = tail call i16 @identity16(i16 %x)
   %b = zext i16 %call to i32
-  %call2 = tail call i32 @identity32(i32 returned %b)
+  %call2 = tail call i32 @identity32(i32 %b)
   ret i16 %x
 }
 
@@ -56,9 +56,9 @@ entry:
 ; This shouldn't be required
 ; CHECKT2D: mov r0, [[SAVEX]]
 
-  %call = tail call i16 @retzext16(i16 returned %x)
+  %call = tail call i16 @retzext16(i16 %x)
   %b = zext i16 %call to i32
-  %call2 = tail call i32 @identity32(i32 returned %b)
+  %call2 = tail call i32 @identity32(i32 %b)
   ret i16 %x
 }
 
@@ -76,9 +76,9 @@ entry:
 ; CHECKT2D: sxth r0, {{r[0-9]+}}
 ; CHECKT2D: bl _identity32
 ; CHECKT2D: mov r0, [[SAVEX]]
-  %call = tail call i16 @retzext16(i16 returned %x)
+  %call = tail call i16 @retzext16(i16 %x)
   %b = sext i16 %call to i32
-  %call2 = tail call i32 @identity32(i32 returned %b)
+  %call2 = tail call i32 @identity32(i32 %b)
   ret i16 %x
 }
 
@@ -96,10 +96,10 @@ entry:
 ; CHECKT2D: uxth r0, r0
 ; CHECKT2D: bl _identity32
 ; CHECKT2D: b.w _paramzext16
-  %call = tail call i16 @paramzext16(i16 zeroext returned %x)
+  %call = tail call i16 @paramzext16(i16 %x)
   %b = zext i16 %call to i32
-  %call2 = tail call i32 @identity32(i32 returned %b)
-  %call3 = tail call i16 @paramzext16(i16 zeroext returned %call)
+  %call2 = tail call i32 @identity32(i32 %b)
+  %call3 = tail call i16 @paramzext16(i16 %call)
   ret i16 %call3
 }
 
@@ -121,13 +121,13 @@ entry:
 ; CHECKT2D: bl _paramzext16
 ; CHECKT2D: bl _identity32
 ; CHECKT2D: b.w _paramzext16
-  %call = tail call i16 @paramzext16(i16 zeroext returned %x)
+  %call = tail call i16 @paramzext16(i16 %x)
 
 ; Should make no 
diff erence if %x is used below rather than %call, but it does
   %b = zext i16 %x to i32
 
   %call2 = tail call i32 @identity32(i32 %b)
-  %call3 = tail call i16 @paramzext16(i16 zeroext returned %call)
+  %call3 = tail call i16 @paramzext16(i16 %call)
   ret i16 %call3
 }
 
@@ -149,9 +149,9 @@ entry:
 ; FIXME: Tail call should be OK here
 ; CHECKT2D: bl _identity32
 
-  %call = tail call i16 @bothzext16(i16 zeroext returned %x)
+  %call = tail call i16 @bothzext16(i16 %x)
   %b = zext i16 %x to i32
-  %call2 = tail call i32 @identity32(i32 returned %b)
+  %call2 = tail call i32 @identity32(i32 %b)
   ret i16 %call
 }
 
@@ -171,8 +171,8 @@ entry:
 ; CHECKT2D: sxth r0, [[SAVEX]]
 ; CHECKT2D: bl _identity32
 ; CHECKT2D: mov r0, [[SAVEX]]
-  %call = tail call i16 @bothzext16(i16 zeroext returned %x)
+  %call = tail call i16 @bothzext16(i16 %x)
   %b = sext i16 %x to i32
-  %call2 = tail call i32 @identity32(i32 returned %b)
+  %call2 = tail call i32 @identity32(i32 %b)
   ret i16 %x
 }

diff  --git a/llvm/test/CodeGen/ARM/this-return.ll b/llvm/test/CodeGen/ARM/this-return.ll
index 4c9c94e932617..bccb4e5c7c713 100644
--- a/llvm/test/CodeGen/ARM/this-return.ll
+++ b/llvm/test/CodeGen/ARM/this-return.ll
@@ -28,9 +28,9 @@ entry:
 ; CHECKT2D-NOT: mov r0, {{r[0-9]+}}
 ; CHECKT2D: b.w _B_ctor_base
   %0 = bitcast %struct.C* %this to %struct.A*
-  %call = tail call %struct.A* @A_ctor_base(%struct.A* returned %0)
+  %call = tail call %struct.A* @A_ctor_base(%struct.A* %0)
   %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
-  %call2 = tail call %struct.B* @B_ctor_base(%struct.B* returned %1, i32 %x)
+  %call2 = tail call %struct.B* @B_ctor_base(%struct.B* %1, i32 %x)
   ret %struct.C* %this
 }
 
@@ -59,7 +59,7 @@ entry:
 ; CHECKELF: b C_ctor_base
 ; CHECKT2D-LABEL: C_ctor_complete:
 ; CHECKT2D: b.w _C_ctor_base
-  %call = tail call %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x)
+  %call = tail call %struct.C* @C_ctor_base(%struct.C* %this, i32 %x)
   ret %struct.C* %this
 }
 
@@ -86,8 +86,8 @@ entry:
 ; CHECKT2D-NOT: mov r0, {{r[0-9]+}}
 ; CHECKT2D: b.w _B_ctor_complete
   %b = getelementptr inbounds %struct.D, %struct.D* %this, i32 0, i32 0
-  %call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
-  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
+  %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
   ret %struct.D* %this
 }
 
@@ -98,8 +98,8 @@ entry:
 ; CHECKT2D-LABEL: E_ctor_base:
 ; CHECKT2D-NOT: b.w _B_ctor_complete
   %b = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 0
-  %call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
+  %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
   %b2 = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 1
-  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b2, i32 %x)
+  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b2, i32 %x)
   ret %struct.E* %this
 }

diff  --git a/llvm/test/CodeGen/SPARC/64abi.ll b/llvm/test/CodeGen/SPARC/64abi.ll
index 6b181d8b34329..c167d0647ecb4 100644
--- a/llvm/test/CodeGen/SPARC/64abi.ll
+++ b/llvm/test/CodeGen/SPARC/64abi.ll
@@ -50,7 +50,7 @@ define void @intarg(i8  %a0,   ; %i0
 ; CHECK-NOT: add %sp
 ; CHECK: restore
 define void @call_intarg(i32 %i0, i8* %i1) {
-  call void @intarg(i8 0, i8 1, i16 2, i32 3, i8* undef, i32 5, i32 signext %i0, i8* %i1)
+  call void @intarg(i8 0, i8 1, i16 2, i32 3, i8* undef, i32 5, i32 %i0, i8* %i1)
   ret void
 }
 
@@ -222,7 +222,7 @@ define i32 @inreg_fi(i32 inreg %a0,     ; high bits of %i0
 ; SOFT:  or %i1, %i0, %o0
 ; CHECK: call inreg_fi
 define void @call_inreg_fi(i32* %p, i32 %i1, float %f5) {
-  %x = call i32 @inreg_fi(i32 inreg %i1, float inreg %f5)
+  %x = call i32 @inreg_fi(i32 %i1, float %f5)
   ret void
 }
 
@@ -245,7 +245,7 @@ define float @inreg_ff(float inreg %a0,   ; %f0
 ; SOFT: or %i1, %i0, %o0
 ; CHECK: call inreg_ff
 define void @call_inreg_ff(i32* %p, float %f3, float %f5) {
-  %x = call float @inreg_ff(float inreg %f3, float inreg %f5)
+  %x = call float @inreg_ff(float %f3, float %f5)
   ret void
 }
 
@@ -269,7 +269,7 @@ define i32 @inreg_if(float inreg %a0, ; %f0
 ; SOFT: or %i1, %i0, %o0
 ; CHECK: call inreg_if
 define void @call_inreg_if(i32* %p, float %f3, i32 %i2) {
-  %x = call i32 @inreg_if(float inreg %f3, i32 inreg %i2)
+  %x = call i32 @inreg_if(float %f3, i32 %i2)
   ret void
 }
 
@@ -289,7 +289,7 @@ define i32 @inreg_ii(i32 inreg %a0,   ; high bits of %i0
 ; CHECK: or [[R1]], [[R2]], %o0
 ; CHECK: call inreg_ii
 define void @call_inreg_ii(i32* %p, i32 %i1, i32 %i2) {
-  %x = call i32 @inreg_ii(i32 inreg %i1, i32 inreg %i2)
+  %x = call i32 @inreg_ii(i32 %i1, i32 %i2)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/SystemZ/args-02.ll b/llvm/test/CodeGen/SystemZ/args-02.ll
index cd07b2c91700b..89b080e821bf4 100644
--- a/llvm/test/CodeGen/SystemZ/args-02.ll
+++ b/llvm/test/CodeGen/SystemZ/args-02.ll
@@ -66,9 +66,9 @@ define void @foo() {
 ; CHECK-STACK: mvghi 160(%r15), -5
 ; CHECK-STACK: brasl %r14, bar at PLT
 
-  call void @bar (i8 signext -1, i16 signext -2, i32 signext -3, i64 -4, float 0.0, double 0.0,
+  call void @bar (i8 -1, i16 -2, i32 -3, i64 -4, float 0.0, double 0.0,
                   fp128 0xL00000000000000000000000000000000, i64 -5,
-                  float -0.0, double -0.0, i8 signext -6, i16 signext -7, i32 signext -8, i64 -9,
+                  float -0.0, double -0.0, i8 -6, i16 -7, i32 -8, i64 -9,
                   float 0.0, double 0.0,
                   fp128 0xL00000000000000000000000000000000)
   ret void

diff  --git a/llvm/test/CodeGen/SystemZ/args-03.ll b/llvm/test/CodeGen/SystemZ/args-03.ll
index 97d5bcde34b26..a52782f4c1836 100644
--- a/llvm/test/CodeGen/SystemZ/args-03.ll
+++ b/llvm/test/CodeGen/SystemZ/args-03.ll
@@ -68,9 +68,9 @@ define void @foo() {
 ; CHECK-STACK: mvghi 160(%r15), -5
 ; CHECK-STACK: brasl %r14, bar at PLT
 
-  call void @bar (i8 zeroext -1, i16 zeroext -2, i32 zeroext -3, i64 -4, float 0.0, double 0.0,
+  call void @bar (i8 -1, i16 -2, i32 -3, i64 -4, float 0.0, double 0.0,
                   fp128 0xL00000000000000000000000000000000, i64 -5,
-                  float -0.0, double -0.0, i8 zeroext -6, i16 zeroext -7, i32 zeroext -8, i64 -9,
+                  float -0.0, double -0.0, i8 -6, i16 -7, i32 -8, i64 -9,
                   float 0.0, double 0.0,
                   fp128 0xL00000000000000000000000000000000)
   ret void

diff  --git a/llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll b/llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
index c518935affb63..436be5b7a736a 100644
--- a/llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
+++ b/llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
@@ -7,7 +7,7 @@ declare x86_fastcallcc void @func(i32*, i64 inreg)
 
 define x86_fastcallcc void @caller(i32, i64) {
         %X = alloca i32         ; <i32*> [#uses=1]
-        call x86_fastcallcc void @func( i32* %X, i64 inreg 0 )
+        call x86_fastcallcc void @func( i32* %X, i64 0 )
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll b/llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll
index b3a6d909c53e3..1fe2515bef889 100644
--- a/llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll
+++ b/llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll
@@ -4,7 +4,7 @@
 declare x86_fastcallcc i64 @callee(i64 inreg)
 
 define i64 @caller() {
-        %X = call x86_fastcallcc  i64 @callee( i64 inreg 4294967299 )          ; <i64> [#uses=1]
+        %X = call x86_fastcallcc  i64 @callee( i64 4294967299 )          ; <i64> [#uses=1]
 ; CHECK: mov{{.*}}edx, 1
         ret i64 %X
 }

diff  --git a/llvm/test/CodeGen/X86/mismatched-byval.ll b/llvm/test/CodeGen/X86/mismatched-byval.ll
deleted file mode 100644
index f03e347848c6d..0000000000000
--- a/llvm/test/CodeGen/X86/mismatched-byval.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
-
-; This tests that we only look at the call site for ABI attributes, so f and f2 should codegen 
diff erently
-
-define void @b(i8* byval(i8) %p) {
-; CHECK-LABEL: b:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    retq
-    ret void
-}
-
-define void @f(i8 %p) {
-; CHECK-LABEL: f:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subq $24, %rsp
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    movb {{[0-9]+}}(%rsp), %al
-; CHECK-NEXT:    movb %al, (%rsp)
-; CHECK-NEXT:    callq b at PLT
-; CHECK-NEXT:    addq $24, %rsp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    retq
-    %a = alloca i8
-    ;store i8 %p, i8* %a
-    call void @b(i8* byval(i8) %a)
-    ret void
-}
-
-define void @f2(i8 %p) {
-; CHECK-LABEL: f2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushq %rax
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rdi
-; CHECK-NEXT:    callq b at PLT
-; CHECK-NEXT:    popq %rax
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    retq
-    %a = alloca i8
-    ;store i8 %p, i8* %a
-    call void @b(i8* %a)
-    ret void
-}
-

diff  --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll
index f693b75f81c4e..33e11235fe83b 100644
--- a/llvm/test/CodeGen/X86/movtopush.ll
+++ b/llvm/test/CodeGen/X86/movtopush.ll
@@ -107,7 +107,7 @@ entry:
 ; NORMAL-NEXT: addl $12, %esp
 define void @test4() optsize {
 entry:
-  call void @inreg(i32 1, i32 inreg 2, i32 3, i32 4)
+  call void @inreg(i32 1, i32 2, i32 3, i32 4)
   ret void
 }
 
@@ -307,9 +307,9 @@ define void @test11() optsize {
 define void @test12() optsize {
 entry:
   %s = alloca %struct.s, align 4
-  call void @struct(%struct.s* byval(%struct.s) %s, i32 2, i32 3, i32 4)
+  call void @struct(%struct.s* %s, i32 2, i32 3, i32 4)
   call void @good(i32 5, i32 6, i32 7, i32 8)
-  call void @struct(%struct.s* byval(%struct.s) %s, i32 10, i32 11, i32 12)
+  call void @struct(%struct.s* %s, i32 10, i32 11, i32 12)
   ret void
 }
 
@@ -340,7 +340,7 @@ define void @test12b() optsize {
 entry:
   %s = alloca %struct.s, align 4
   call void @good(i32 1, i32 2, i32 3, i32 4)
-  call void @struct(%struct.s* byval(%struct.s) %s, i32 6, i32 7, i32 8)
+  call void @struct(%struct.s* %s, i32 6, i32 7, i32 8)
   call void @good(i32 9, i32 10, i32 11, i32 12)
   ret void
 }
@@ -413,7 +413,7 @@ entry:
   %0 = bitcast %struct.A* %a to i64*
   %1 = load i64, i64* %0, align 4
   store i64 %1, i64* %agg.tmp, align 4
-  %call = call x86_thiscallcc %struct.B* @B_ctor(%struct.B* returned %ref.tmp, %struct.A* byval(%struct.A) %tmpcast)
+  %call = call x86_thiscallcc %struct.B* @B_ctor(%struct.B* %ref.tmp, %struct.A* byval(%struct.A) %tmpcast)
   %2 = getelementptr inbounds %struct.B, %struct.B* %tmp, i32 0, i32 0
   call void @B_func(%struct.B* sret(%struct.B) %tmp, %struct.B* %ref.tmp, i32 1)
   ret void

diff  --git a/llvm/test/CodeGen/X86/pop-stack-cleanup.ll b/llvm/test/CodeGen/X86/pop-stack-cleanup.ll
index 050fb41ed218f..f81d911ea31b1 100644
--- a/llvm/test/CodeGen/X86/pop-stack-cleanup.ll
+++ b/llvm/test/CodeGen/X86/pop-stack-cleanup.ll
@@ -60,7 +60,7 @@ define void @spill(i32 inreg %a, i32 inreg %b, i32 inreg %c) minsize nounwind {
 ; CHECK-DAG: movl {{.*}}, %edx
 ; CHECK: calll _spill
   %i = call i32 @param2_ret(i32 1, i32 2)
-  call void @spill(i32 inreg %a, i32 inreg %b, i32 inreg %c)
+  call void @spill(i32 %a, i32 %b, i32 %c)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/preallocated.ll b/llvm/test/CodeGen/X86/preallocated.ll
index 505be974aa86d..4b6bdbb6fb7dd 100644
--- a/llvm/test/CodeGen/X86/preallocated.ll
+++ b/llvm/test/CodeGen/X86/preallocated.ll
@@ -129,11 +129,11 @@ define void @nested_with_init() {
 ; CHECK: pushl [[REGISTER2]]
 ; CHECK: calll _init
 
-  call void @foo_ret_p(%Foo* sret(%Foo) %b1, %Foo* preallocated(%Foo) %b2) ["preallocated"(token %t2)]
+  call void @foo_ret_p(%Foo* %b1, %Foo* preallocated(%Foo) %b2) ["preallocated"(token %t2)]
 ; CHECK-NOT: subl {{\$[0-9]+}}, %esp
 ; CHECK-NOT: pushl
 ; CHECK: calll _foo_ret_p
-  call void @foo_ret_p(%Foo* sret(%Foo) %tmp, %Foo* preallocated(%Foo) %b1) ["preallocated"(token %t1)]
+  call void @foo_ret_p(%Foo* %tmp, %Foo* preallocated(%Foo) %b1) ["preallocated"(token %t1)]
 ; CHECK-NOT: subl {{\$[0-9]+}}, %esp
 ; CHECK-NOT: pushl
 ; CHECK: calll _foo_ret_p
@@ -150,7 +150,7 @@ define void @inreg() {
 ; CHECK: subl $8, %esp
 ; CHECK: movl $9, %eax
 ; CHECK: calll _foo_inreg_p
-  call void @foo_inreg_p(i32 inreg 9, %Foo* preallocated(%Foo) %b) ["preallocated"(token %t)]
+  call void @foo_inreg_p(i32 9, %Foo* preallocated(%Foo) %b) ["preallocated"(token %t)]
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/tailcall-msvc-conventions.ll b/llvm/test/CodeGen/X86/tailcall-msvc-conventions.ll
index 77fe468e6f33d..98b02c9c07e82 100644
--- a/llvm/test/CodeGen/X86/tailcall-msvc-conventions.ll
+++ b/llvm/test/CodeGen/X86/tailcall-msvc-conventions.ll
@@ -181,7 +181,7 @@ define x86_stdcallcc void @stdcall_thiscall_tail(i32 %a, i32 %b) {
 
 declare x86_fastcallcc void @fastcall2(i32 inreg %a, i32 inreg %b)
 define void @cdecl_fastcall_tail(i32 %a, i32 %b) {
-  tail call x86_fastcallcc void @fastcall2(i32 inreg %a, i32 inreg %b)
+  tail call x86_fastcallcc void @fastcall2(i32 %a, i32 %b)
   ret void
 }
 ; fastcall2 won't pop anything.


        


More information about the llvm-commits mailing list