[llvm] r311279 - [GlobalISel][X86] Support call ABI.
Igor Breger via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 20 02:25:22 PDT 2017
Author: ibreger
Date: Sun Aug 20 02:25:22 2017
New Revision: 311279
URL: http://llvm.org/viewvc/llvm-project?rev=311279&view=rev
Log:
[GlobalISel][X86] Support call ABI.
Summary: Support call ABI. For now only Linux C and X86_64_SysV calling conventions supported. Variadic function not supported.
Reviewers: zvi, guyblank, oren_ben_simhon
Reviewed By: oren_ben_simhon
Subscribers: rovka, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D34602
Modified:
llvm/trunk/lib/Target/X86/X86CallLowering.cpp
llvm/trunk/lib/Target/X86/X86CallLowering.h
llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll
llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
Modified: llvm/trunk/lib/Target/X86/X86CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86CallLowering.cpp?rev=311279&r1=311278&r2=311279&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86CallLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86CallLowering.cpp Sun Aug 20 02:25:22 2017
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -78,14 +79,29 @@ bool X86CallLowering::splitToValueTypes(
}
namespace {
-struct FuncReturnHandler : public CallLowering::ValueHandler {
- FuncReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
- : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+struct OutgoingValueHandler : public CallLowering::ValueHandler {
+ OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
+ : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB), StackSize(0),
+ DL(MIRBuilder.getMF().getDataLayout()),
+ STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {}
unsigned getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
- llvm_unreachable("Don't know how to get a stack address yet");
+
+ LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0));
+ LLT SType = LLT::scalar(DL.getPointerSizeInBits(0));
+ unsigned SPReg = MRI.createGenericVirtualRegister(p0);
+ MIRBuilder.buildCopy(SPReg, STI.getRegisterInfo()->getStackRegister());
+
+ unsigned OffsetReg = MRI.createGenericVirtualRegister(SType);
+ MIRBuilder.buildConstant(OffsetReg, Offset);
+
+ unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
+ MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
+
+ MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
+ return AddrReg;
}
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
@@ -97,10 +113,33 @@ struct FuncReturnHandler : public CallLo
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
- llvm_unreachable("Don't know how to assign a value to an address yet");
+
+ unsigned ExtReg = extendRegister(ValVReg, VA);
+ auto MMO = MIRBuilder.getMF().getMachineMemOperand(
+ MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
+ /* Alignment */ 0);
+ MIRBuilder.buildStore(ExtReg, Addr, *MMO);
}
+ bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ const CallLowering::ArgInfo &Info, CCState &State) override {
+
+ if (!Info.IsFixed)
+ return true; // TODO: handle variadic function
+
+ bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ StackSize = State.getNextStackOffset();
+ return Res;
+ }
+
+ uint64_t getStackSize() { return StackSize; }
+
+protected:
MachineInstrBuilder &MIB;
+ uint64_t StackSize;
+ const DataLayout &DL;
+ const X86Subtarget &STI;
};
} // End anonymous namespace.
@@ -127,7 +166,7 @@ bool X86CallLowering::lowerReturn(Machin
}))
return false;
- FuncReturnHandler Handler(MIRBuilder, MRI, MIB, RetCC_X86);
+ OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, RetCC_X86);
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
return false;
}
@@ -137,10 +176,11 @@ bool X86CallLowering::lowerReturn(Machin
}
namespace {
-struct FormalArgHandler : public CallLowering::ValueHandler {
- FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn, const DataLayout &DL)
- : ValueHandler(MIRBuilder, MRI, AssignFn), DL(DL) {}
+struct IncomingValueHandler : public CallLowering::ValueHandler {
+ IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : ValueHandler(MIRBuilder, MRI, AssignFn),
+ DL(MIRBuilder.getMF().getDataLayout()) {}
unsigned getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
@@ -164,14 +204,37 @@ struct FormalArgHandler : public CallLow
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
}
+protected:
+ const DataLayout &DL;
+};
+
+struct FormalArgHandler : public IncomingValueHandler {
+ FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
+
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
CCValAssign &VA) override {
MIRBuilder.getMBB().addLiveIn(PhysReg);
MIRBuilder.buildCopy(ValVReg, PhysReg);
}
+};
- const DataLayout &DL;
+struct CallReturnHandler : public IncomingValueHandler {
+ CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn, MachineInstrBuilder &MIB)
+ : IncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+
+ void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ CCValAssign &VA) override {
+ MIB.addDef(PhysReg, RegState::Implicit);
+ MIRBuilder.buildCopy(ValVReg, PhysReg);
+ }
+
+protected:
+ MachineInstrBuilder &MIB;
};
+
} // namespace
bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
@@ -215,7 +278,7 @@ bool X86CallLowering::lowerFormalArgumen
if (!MBB.empty())
MIRBuilder.setInstr(*MBB.begin());
- FormalArgHandler Handler(MIRBuilder, MRI, CC_X86, DL);
+ FormalArgHandler Handler(MIRBuilder, MRI, CC_X86);
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
return false;
@@ -224,3 +287,94 @@ bool X86CallLowering::lowerFormalArgumen
return true;
}
+
+bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
+ CallingConv::ID CallConv,
+ const MachineOperand &Callee,
+ const ArgInfo &OrigRet,
+ ArrayRef<ArgInfo> OrigArgs) const {
+
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = *MF.getFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ auto &DL = F.getParent()->getDataLayout();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
+ auto TRI = STI.getRegisterInfo();
+
+ // Handle only Linux C, X86_64_SysV calling conventions for now.
+ if (!STI.isTargetLinux() ||
+ !(CallConv == CallingConv::C || CallConv == CallingConv::X86_64_SysV))
+ return false;
+
+ unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
+ auto CallSeqStart = MIRBuilder.buildInstr(AdjStackDown);
+
+ // Create a temporarily-floating call instruction so we can add the implicit
+ // uses of arg registers.
+ bool Is64Bit = STI.is64Bit();
+ unsigned CallOpc = Callee.isReg()
+ ? (Is64Bit ? X86::CALL64r : X86::CALL32r)
+ : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
+
+ auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc).add(Callee).addRegMask(
+ TRI->getCallPreservedMask(MF, CallConv));
+
+ SmallVector<ArgInfo, 8> SplitArgs;
+ for (const auto &OrigArg : OrigArgs) {
+ if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
+ [&](ArrayRef<unsigned> Regs) {
+ MIRBuilder.buildUnmerge(Regs, OrigArg.Reg);
+ }))
+ return false;
+ }
+ // Do the actual argument marshalling.
+ OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, CC_X86);
+ if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
+ return false;
+
+ // Now we can add the actual call instruction to the correct basic block.
+ MIRBuilder.insertInstr(MIB);
+
+ // If Callee is a reg, since it is used by a target specific
+ // instruction, it must have a register class matching the
+ // constraint of that instruction.
+ if (Callee.isReg())
+ MIB->getOperand(0).setReg(constrainOperandRegClass(
+ MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
+ *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(),
+ Callee.getReg(), 0));
+
+ // Finally we can copy the returned value back into its virtual-register. In
+ // symmetry with the arguments, the physical register must be an
+ // implicit-define of the call instruction.
+
+ if (OrigRet.Reg) {
+ SplitArgs.clear();
+ SmallVector<unsigned, 8> NewRegs;
+
+ if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
+ [&](ArrayRef<unsigned> Regs) {
+ NewRegs.assign(Regs.begin(), Regs.end());
+ }))
+ return false;
+
+ CallReturnHandler Handler(MIRBuilder, MRI, RetCC_X86, MIB);
+ if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
+ return false;
+
+ if (!NewRegs.empty())
+ MIRBuilder.buildMerge(OrigRet.Reg, NewRegs);
+ }
+
+ CallSeqStart.addImm(Handler.getStackSize())
+ .addImm(0 /* see getFrameTotalSize */)
+ .addImm(0 /* see getFrameAdjustment */);
+
+ unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
+ MIRBuilder.buildInstr(AdjStackUp)
+ .addImm(Handler.getStackSize())
+ .addImm(0 /* NumBytesForCalleeToPop */);
+
+ return true;
+}
Modified: llvm/trunk/lib/Target/X86/X86CallLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86CallLowering.h?rev=311279&r1=311278&r2=311279&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86CallLowering.h (original)
+++ llvm/trunk/lib/Target/X86/X86CallLowering.h Sun Aug 20 02:25:22 2017
@@ -35,6 +35,10 @@ public:
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
ArrayRef<unsigned> VRegs) const override;
+ bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
+ const MachineOperand &Callee, const ArgInfo &OrigRet,
+ ArrayRef<ArgInfo> OrigArgs) const override;
+
private:
/// A function of this type is used to perform value split action.
typedef std::function<void(ArrayRef<unsigned>)> SplitArgTy;
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll?rev=311279&r1=311278&r2=311279&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll Sun Aug 20 02:25:22 2017
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_GISEL
-; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_ISEL
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_GISEL
-; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_ISEL
+; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
define i32 @test_ret_i32() {
; X32-LABEL: test_ret_i32:
@@ -18,17 +16,11 @@ define i32 @test_ret_i32() {
}
define i64 @test_ret_i64() {
-; X32_GISEL-LABEL: test_ret_i64:
-; X32_GISEL: # BB#0:
-; X32_GISEL-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF
-; X32_GISEL-NEXT: movl $15, %edx
-; X32_GISEL-NEXT: retl
-;
-; X32_ISEL-LABEL: test_ret_i64:
-; X32_ISEL: # BB#0:
-; X32_ISEL-NEXT: movl $-1, %eax
-; X32_ISEL-NEXT: movl $15, %edx
-; X32_ISEL-NEXT: retl
+; X32-LABEL: test_ret_i64:
+; X32: # BB#0:
+; X32-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF
+; X32-NEXT: movl $15, %edx
+; X32-NEXT: retl
;
; X64-LABEL: test_ret_i64:
; X64: # BB#0:
@@ -101,7 +93,6 @@ define i64 @test_i64_args_8(i64 %arg1, i
; X64: # BB#0:
; X64-NEXT: movq 16(%rsp), %rax
; X64-NEXT: retq
-
ret i64 %arg8
}
@@ -118,14 +109,250 @@ define <4 x i32> @test_v4i32_args(<4 x i
ret <4 x i32> %arg2
}
-define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
+define <8 x i32> @test_v8i32_args(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-LABEL: test_v8i32_args:
; X32: # BB#0:
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: .Lcfi0:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: movups 16(%esp), %xmm1
+; X32-NEXT: movaps %xmm2, %xmm0
+; X32-NEXT: addl $12, %esp
; X32-NEXT: retl
;
; X64-LABEL: test_v8i32_args:
; X64: # BB#0:
+; X64-NEXT: movaps %xmm2, %xmm0
+; X64-NEXT: movaps %xmm3, %xmm1
+; X64-NEXT: retq
+ ret <8 x i32> %arg2
+}
+
+declare void @trivial_callee()
+define void @test_trivial_call() {
+; X32-LABEL: test_trivial_call:
+; X32: # BB#0:
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: .Lcfi1:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: calll trivial_callee
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_trivial_call:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: .Lcfi0:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: callq trivial_callee
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+ call void @trivial_callee()
+ ret void
+}
+
+declare void @simple_arg_callee(i32 %in0, i32 %in1)
+define void @test_simple_arg_call(i32 %in0, i32 %in1) {
+; X32-LABEL: test_simple_arg_call:
+; X32: # BB#0:
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: .Lcfi2:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: movl 16(%esp), %eax
+; X32-NEXT: movl 20(%esp), %ecx
+; X32-NEXT: movl %ecx, (%esp)
+; X32-NEXT: movl %eax, 4(%esp)
+; X32-NEXT: calll simple_arg_callee
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_simple_arg_call:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: .Lcfi1:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: movl %esi, %edi
+; X64-NEXT: movl %eax, %esi
+; X64-NEXT: callq simple_arg_callee
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+ call void @simple_arg_callee(i32 %in1, i32 %in0)
+ ret void
+}
+
+declare void @simple_arg8_callee(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8)
+define void @test_simple_arg8_call(i32 %in0) {
+; X32-LABEL: test_simple_arg8_call:
+; X32: # BB#0:
+; X32-NEXT: subl $44, %esp
+; X32-NEXT: .Lcfi3:
+; X32-NEXT: .cfi_def_cfa_offset 48
+; X32-NEXT: movl 48(%esp), %eax
+; X32-NEXT: movl %eax, (%esp)
+; X32-NEXT: movl %eax, 4(%esp)
+; X32-NEXT: movl %eax, 8(%esp)
+; X32-NEXT: movl %eax, 12(%esp)
+; X32-NEXT: movl %eax, 16(%esp)
+; X32-NEXT: movl %eax, 20(%esp)
+; X32-NEXT: movl %eax, 24(%esp)
+; X32-NEXT: movl %eax, 28(%esp)
+; X32-NEXT: calll simple_arg8_callee
+; X32-NEXT: addl $44, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_simple_arg8_call:
+; X64: # BB#0:
+; X64-NEXT: subq $24, %rsp
+; X64-NEXT: .Lcfi2:
+; X64-NEXT: .cfi_def_cfa_offset 32
+; X64-NEXT: movl %edi, (%rsp)
+; X64-NEXT: movl %edi, 8(%rsp)
+; X64-NEXT: movl %edi, %esi
+; X64-NEXT: movl %edi, %edx
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl %edi, %r8d
+; X64-NEXT: movl %edi, %r9d
+; X64-NEXT: callq simple_arg8_callee
+; X64-NEXT: addq $24, %rsp
+; X64-NEXT: retq
+ call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0)
+ ret void
+}
+
+declare i32 @simple_return_callee(i32 %in0)
+define i32 @test_simple_return_callee() {
+; X32-LABEL: test_simple_return_callee:
+; X32: # BB#0:
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: .Lcfi4:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: movl $5, %eax
+; X32-NEXT: movl %eax, (%esp)
+; X32-NEXT: calll simple_return_callee
+; X32-NEXT: addl %eax, %eax
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_simple_return_callee:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: .Lcfi3:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: movl $5, %edi
+; X64-NEXT: callq simple_return_callee
+; X64-NEXT: addl %eax, %eax
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %call = call i32 @simple_return_callee(i32 5)
+ %r = add i32 %call, %call
+ ret i32 %r
+}
+
+declare <8 x i32> @split_return_callee(<8 x i32> %in0)
+define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
+; X32-LABEL: test_split_return_callee:
+; X32: # BB#0:
+; X32-NEXT: subl $44, %esp
+; X32-NEXT: .Lcfi5:
+; X32-NEXT: .cfi_def_cfa_offset 48
+; X32-NEXT: movaps %xmm0, (%esp) # 16-byte Spill
+; X32-NEXT: movaps %xmm1, 16(%esp) # 16-byte Spill
+; X32-NEXT: movdqu 48(%esp), %xmm1
+; X32-NEXT: movdqa %xmm2, %xmm0
+; X32-NEXT: calll split_return_callee
+; X32-NEXT: paddd (%esp), %xmm0 # 16-byte Folded Reload
+; X32-NEXT: paddd 16(%esp), %xmm1 # 16-byte Folded Reload
+; X32-NEXT: addl $44, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_split_return_callee:
+; X64: # BB#0:
+; X64-NEXT: subq $40, %rsp
+; X64-NEXT: .Lcfi4:
+; X64-NEXT: .cfi_def_cfa_offset 48
+; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; X64-NEXT: movaps %xmm1, 16(%rsp) # 16-byte Spill
+; X64-NEXT: movdqa %xmm2, %xmm0
+; X64-NEXT: movdqa %xmm3, %xmm1
+; X64-NEXT: callq split_return_callee
+; X64-NEXT: paddd (%rsp), %xmm0 # 16-byte Folded Reload
+; X64-NEXT: paddd 16(%rsp), %xmm1 # 16-byte Folded Reload
+; X64-NEXT: addq $40, %rsp
; X64-NEXT: retq
+ %call = call <8 x i32> @split_return_callee(<8 x i32> %arg2)
+ %r = add <8 x i32> %arg1, %call
+ ret <8 x i32> %r
+}
- ret <8 x i32> %arg1
+define void @test_indirect_call(void()* %func) {
+; X32-LABEL: test_indirect_call:
+; X32: # BB#0:
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: .Lcfi6:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: calll *16(%esp)
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_indirect_call:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: .Lcfi5:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: callq *%rdi
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+ call void %func()
+ ret void
+}
+
+declare void @take_char(i8)
+define void @test_abi_exts_call(i8* %addr) {
+; X32-LABEL: test_abi_exts_call:
+; X32: # BB#0:
+; X32-NEXT: pushl %ebx
+; X32-NEXT: .Lcfi7:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .Lcfi8:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: .Lcfi9:
+; X32-NEXT: .cfi_offset %ebx, -8
+; X32-NEXT: movl 16(%esp), %eax
+; X32-NEXT: movb (%eax), %bl
+; X32-NEXT: movb %bl, (%esp)
+; X32-NEXT: calll take_char
+; X32-NEXT: movsbl %bl, %eax
+; X32-NEXT: movl %eax, (%esp)
+; X32-NEXT: calll take_char
+; X32-NEXT: movzbl %bl, %eax
+; X32-NEXT: movl %eax, (%esp)
+; X32-NEXT: calll take_char
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: popl %ebx
+; X32-NEXT: retl
+;
+; X64-LABEL: test_abi_exts_call:
+; X64: # BB#0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: .Lcfi6:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: .Lcfi7:
+; X64-NEXT: .cfi_offset %rbx, -16
+; X64-NEXT: movb (%rdi), %bl
+; X64-NEXT: movl %ebx, %edi
+; X64-NEXT: callq take_char
+; X64-NEXT: movsbl %bl, %ebx
+; X64-NEXT: movl %ebx, %edi
+; X64-NEXT: callq take_char
+; X64-NEXT: movzbl %bl, %edi
+; X64-NEXT: callq take_char
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+ %val = load i8, i8* %addr
+ call void @take_char(i8 %val)
+ call void @take_char(i8 signext %val)
+ call void @take_char(i8 zeroext %val)
+ ret void
}
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll?rev=311279&r1=311278&r2=311279&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll Sun Aug 20 02:25:22 2017
@@ -392,3 +392,308 @@ define i32 * @test_memop_i32(i32 * %p1)
ret i32 * %p1;
}
+
+declare void @trivial_callee()
+define void @test_trivial_call() {
+; ALL-LABEL: name: test_trivial_call
+
+; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: CALLpcrel32 @trivial_callee, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: RET 0
+
+; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: CALL64pcrel32 @trivial_callee, csr_64, implicit %rsp
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: RET 0
+
+ call void @trivial_callee()
+ ret void
+}
+
+declare void @simple_arg_callee(i32 %in0, i32 %in1)
+define void @test_simple_arg(i32 %in0, i32 %in1) {
+; ALL-LABEL: name: test_simple_arg
+
+; X32: fixedStack:
+; X32: - { id: 0, type: default, offset: 4, size: 4, alignment: 4,
+; X32-NEXT: isImmutable: true,
+; X32: - { id: 1, type: default, offset: 0, size: 4, alignment: 16,
+; X32-NEXT: isImmutable: true,
+; X32: body: |
+; X32-NEXT: bb.1 (%ir-block.0):
+; X32-NEXT: %2(p0) = G_FRAME_INDEX %fixed-stack.1
+; X32-NEXT: %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+; X32-NEXT: %3(p0) = G_FRAME_INDEX %fixed-stack.0
+; X32-NEXT: %1(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+; X32-NEXT: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %4(p0) = COPY %esp
+; X32-NEXT: %5(s32) = G_CONSTANT i32 0
+; X32-NEXT: %6(p0) = G_GEP %4, %5(s32)
+; X32-NEXT: G_STORE %1(s32), %6(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: %7(p0) = COPY %esp
+; X32-NEXT: %8(s32) = G_CONSTANT i32 4
+; X32-NEXT: %9(p0) = G_GEP %7, %8(s32)
+; X32-NEXT: G_STORE %0(s32), %9(p0) :: (store 4 into stack + 4, align 0)
+; X32-NEXT: CALLpcrel32 @simple_arg_callee, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: RET 0
+
+; X64: %0(s32) = COPY %edi
+; X64-NEXT: %1(s32) = COPY %esi
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %edi = COPY %1(s32)
+; X64-NEXT: %esi = COPY %0(s32)
+; X64-NEXT: CALL64pcrel32 @simple_arg_callee, csr_64, implicit %rsp, implicit %edi, implicit %esi
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: RET 0
+
+ call void @simple_arg_callee(i32 %in1, i32 %in0)
+ ret void
+}
+
+declare void @simple_arg8_callee(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8)
+define void @test_simple_arg8_call(i32 %in0) {
+; ALL-LABEL: name: test_simple_arg8_call
+
+; X32: fixedStack:
+; X32: - { id: 0, type: default, offset: 0, size: 4, alignment: 16,
+; X32-NEXT: isImmutable: true,
+; X32: body: |
+; X32-NEXT: bb.1 (%ir-block.0):
+; X32-NEXT: %1(p0) = G_FRAME_INDEX %fixed-stack.0
+; X32-NEXT: %0(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+; X32-NEXT: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %2(p0) = COPY %esp
+; X32-NEXT: %3(s32) = G_CONSTANT i32 0
+; X32-NEXT: %4(p0) = G_GEP %2, %3(s32)
+; X32-NEXT: G_STORE %0(s32), %4(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: %5(p0) = COPY %esp
+; X32-NEXT: %6(s32) = G_CONSTANT i32 4
+; X32-NEXT: %7(p0) = G_GEP %5, %6(s32)
+; X32-NEXT: G_STORE %0(s32), %7(p0) :: (store 4 into stack + 4, align 0)
+; X32-NEXT: %8(p0) = COPY %esp
+; X32-NEXT: %9(s32) = G_CONSTANT i32 8
+; X32-NEXT: %10(p0) = G_GEP %8, %9(s32)
+; X32-NEXT: G_STORE %0(s32), %10(p0) :: (store 4 into stack + 8, align 0)
+; X32-NEXT: %11(p0) = COPY %esp
+; X32-NEXT: %12(s32) = G_CONSTANT i32 12
+; X32-NEXT: %13(p0) = G_GEP %11, %12(s32)
+; X32-NEXT: G_STORE %0(s32), %13(p0) :: (store 4 into stack + 12, align 0)
+; X32-NEXT: %14(p0) = COPY %esp
+; X32-NEXT: %15(s32) = G_CONSTANT i32 16
+; X32-NEXT: %16(p0) = G_GEP %14, %15(s32)
+; X32-NEXT: G_STORE %0(s32), %16(p0) :: (store 4 into stack + 16, align 0)
+; X32-NEXT: %17(p0) = COPY %esp
+; X32-NEXT: %18(s32) = G_CONSTANT i32 20
+; X32-NEXT: %19(p0) = G_GEP %17, %18(s32)
+; X32-NEXT: G_STORE %0(s32), %19(p0) :: (store 4 into stack + 20, align 0)
+; X32-NEXT: %20(p0) = COPY %esp
+; X32-NEXT: %21(s32) = G_CONSTANT i32 24
+; X32-NEXT: %22(p0) = G_GEP %20, %21(s32)
+; X32-NEXT: G_STORE %0(s32), %22(p0) :: (store 4 into stack + 24, align 0)
+; X32-NEXT: %23(p0) = COPY %esp
+; X32-NEXT: %24(s32) = G_CONSTANT i32 28
+; X32-NEXT: %25(p0) = G_GEP %23, %24(s32)
+; X32-NEXT: G_STORE %0(s32), %25(p0) :: (store 4 into stack + 28, align 0)
+; X32-NEXT: CALLpcrel32 @simple_arg8_callee, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 32, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: RET 0
+
+; X64: %0(s32) = COPY %edi
+; X64-NEXT: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %edi = COPY %0(s32)
+; X64-NEXT: %esi = COPY %0(s32)
+; X64-NEXT: %edx = COPY %0(s32)
+; X64-NEXT: %ecx = COPY %0(s32)
+; X64-NEXT: %r8d = COPY %0(s32)
+; X64-NEXT: %r9d = COPY %0(s32)
+; X64-NEXT: %1(p0) = COPY %rsp
+; X64-NEXT: %2(s64) = G_CONSTANT i64 0
+; X64-NEXT: %3(p0) = G_GEP %1, %2(s64)
+; X64-NEXT: G_STORE %0(s32), %3(p0) :: (store 4 into stack, align 0)
+; X64-NEXT: %4(p0) = COPY %rsp
+; X64-NEXT: %5(s64) = G_CONSTANT i64 8
+; X64-NEXT: %6(p0) = G_GEP %4, %5(s64)
+; X64-NEXT: G_STORE %0(s32), %6(p0) :: (store 4 into stack + 8, align 0)
+; X64-NEXT: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit %rsp, implicit %edi, implicit %esi, implicit %edx, implicit %ecx, implicit %r8d, implicit %r9d
+; X64-NEXT: ADJCALLSTACKUP64 16, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: RET 0
+
+ call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0)
+ ret void
+}
+
+declare i32 @simple_return_callee(i32 %in0)
+define i32 @test_simple_return_callee() {
+; ALL-LABEL: name: test_simple_return_callee
+
+; X32: %1(s32) = G_CONSTANT i32 5
+; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %2(p0) = COPY %esp
+; X32-NEXT: %3(s32) = G_CONSTANT i32 0
+; X32-NEXT: %4(p0) = G_GEP %2, %3(s32)
+; X32-NEXT: G_STORE %1(s32), %4(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: CALLpcrel32 @simple_return_callee, csr_32, implicit %esp, implicit-def %eax
+; X32-NEXT: %0(s32) = COPY %eax
+; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %5(s32) = G_ADD %0, %0
+; X32-NEXT: %eax = COPY %5(s32)
+; X32-NEXT: RET 0, implicit %eax
+
+; X64: %1(s32) = G_CONSTANT i32 5
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %edi = COPY %1(s32)
+; X64-NEXT: CALL64pcrel32 @simple_return_callee, csr_64, implicit %rsp, implicit %edi, implicit-def %eax
+; X64-NEXT: %0(s32) = COPY %eax
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %2(s32) = G_ADD %0, %0
+; X64-NEXT: %eax = COPY %2(s32)
+; X64-NEXT: RET 0, implicit %eax
+
+ %call = call i32 @simple_return_callee(i32 5)
+ %r = add i32 %call, %call
+ ret i32 %r
+}
+
+declare <8 x i32> @split_return_callee(<8 x i32> %in0)
+define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
+; ALL-LABEL: name: test_split_return_callee
+
+; X32: fixedStack:
+; X32-NEXT: - { id: 0, type: default, offset: 0, size: 16, alignment: 16,
+; X32-NEXT: isImmutable: true,
+; X32: %2(<4 x s32>) = COPY %xmm0
+; X32-NEXT: %3(<4 x s32>) = COPY %xmm1
+; X32-NEXT: %4(<4 x s32>) = COPY %xmm2
+; X32-NEXT: %6(p0) = G_FRAME_INDEX %fixed-stack.0
+; X32-NEXT: %5(<4 x s32>) = G_LOAD %6(p0) :: (invariant load 16 from %fixed-stack.0, align 0)
+; X32-NEXT: %0(<8 x s32>) = G_MERGE_VALUES %2(<4 x s32>), %3(<4 x s32>)
+; X32-NEXT: %1(<8 x s32>) = G_MERGE_VALUES %4(<4 x s32>), %5(<4 x s32>)
+; X32-NEXT: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %8(<4 x s32>), %9(<4 x s32>) = G_UNMERGE_VALUES %1(<8 x s32>)
+; X32-NEXT: %xmm0 = COPY %8(<4 x s32>)
+; X32-NEXT: %xmm1 = COPY %9(<4 x s32>)
+; X32-NEXT: CALLpcrel32 @split_return_callee, csr_32, implicit %esp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
+; X32-NEXT: %10(<4 x s32>) = COPY %xmm0
+; X32-NEXT: %11(<4 x s32>) = COPY %xmm1
+; X32-NEXT: %7(<8 x s32>) = G_MERGE_VALUES %10(<4 x s32>), %11(<4 x s32>)
+; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %12(<8 x s32>) = G_ADD %0, %7
+; X32-NEXT: %13(<4 x s32>), %14(<4 x s32>) = G_UNMERGE_VALUES %12(<8 x s32>)
+; X32-NEXT: %xmm0 = COPY %13(<4 x s32>)
+; X32-NEXT: %xmm1 = COPY %14(<4 x s32>)
+; X32-NEXT: RET 0, implicit %xmm0, implicit %xmm1
+
+; X64: %2(<4 x s32>) = COPY %xmm0
+; X64-NEXT: %3(<4 x s32>) = COPY %xmm1
+; X64-NEXT: %4(<4 x s32>) = COPY %xmm2
+; X64-NEXT: %5(<4 x s32>) = COPY %xmm3
+; X64-NEXT: %0(<8 x s32>) = G_MERGE_VALUES %2(<4 x s32>), %3(<4 x s32>)
+; X64-NEXT: %1(<8 x s32>) = G_MERGE_VALUES %4(<4 x s32>), %5(<4 x s32>)
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %7(<4 x s32>), %8(<4 x s32>) = G_UNMERGE_VALUES %1(<8 x s32>)
+; X64-NEXT: %xmm0 = COPY %7(<4 x s32>)
+; X64-NEXT: %xmm1 = COPY %8(<4 x s32>)
+; X64-NEXT: CALL64pcrel32 @split_return_callee, csr_64, implicit %rsp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
+; X64-NEXT: %9(<4 x s32>) = COPY %xmm0
+; X64-NEXT: %10(<4 x s32>) = COPY %xmm1
+; X64-NEXT: %6(<8 x s32>) = G_MERGE_VALUES %9(<4 x s32>), %10(<4 x s32>)
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %11(<8 x s32>) = G_ADD %0, %6
+; X64-NEXT: %12(<4 x s32>), %13(<4 x s32>) = G_UNMERGE_VALUES %11(<8 x s32>)
+; X64-NEXT: %xmm0 = COPY %12(<4 x s32>)
+; X64-NEXT: %xmm1 = COPY %13(<4 x s32>)
+; X64-NEXT: RET 0, implicit %xmm0, implicit %xmm1
+
+ %call = call <8 x i32> @split_return_callee(<8 x i32> %arg2)
+ %r = add <8 x i32> %arg1, %call
+ ret <8 x i32> %r
+}
+
+define void @test_indirect_call(void()* %func) {
+; ALL-LABEL: name: test_indirect_call
+
+; X32: registers:
+; X32-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+; X32-NEXT: - { id: 1, class: _, preferred-register: '' }
+; X32: %1(p0) = G_FRAME_INDEX %fixed-stack.0
+; X32-NEXT: %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+; X32-NEXT: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: CALL32r %0(p0), csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: RET 0
+
+; X64: registers:
+; X64-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+; X64: %0(p0) = COPY %rdi
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: CALL64r %0(p0), csr_64, implicit %rsp
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: RET 0
+
+ call void %func()
+ ret void
+}
+
+
+declare void @take_char(i8)
+define void @test_abi_exts_call(i8* %addr) {
+; ALL-LABEL: name: test_abi_exts_call
+
+; X32: fixedStack:
+; X32-NEXT: - { id: 0, type: default, offset: 0, size: 4, alignment: 16,
+; X32-NEXT: isImmutable: true,
+; X32: %1(p0) = G_FRAME_INDEX %fixed-stack.0
+; X32-NEXT: %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+; X32-NEXT: %2(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr)
+; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %3(p0) = COPY %esp
+; X32-NEXT: %4(s32) = G_CONSTANT i32 0
+; X32-NEXT: %5(p0) = G_GEP %3, %4(s32)
+; X32-NEXT: G_STORE %2(s8), %5(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: CALLpcrel32 @take_char, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %6(p0) = COPY %esp
+; X32-NEXT: %7(s32) = G_CONSTANT i32 0
+; X32-NEXT: %8(p0) = G_GEP %6, %7(s32)
+; X32-NEXT: %9(s32) = G_SEXT %2(s8)
+; X32-NEXT: G_STORE %9(s32), %8(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: CALLpcrel32 @take_char, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %10(p0) = COPY %esp
+; X32-NEXT: %11(s32) = G_CONSTANT i32 0
+; X32-NEXT: %12(p0) = G_GEP %10, %11(s32)
+; X32-NEXT: %13(s32) = G_ZEXT %2(s8)
+; X32-NEXT: G_STORE %13(s32), %12(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: CALLpcrel32 @take_char, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: RET 0
+
+; X64: %0(p0) = COPY %rdi
+; X64-NEXT: %1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr)
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %edi = COPY %1(s8)
+; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %edi
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %2(s32) = G_SEXT %1(s8)
+; X64-NEXT: %edi = COPY %2(s32)
+; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %edi
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %3(s32) = G_ZEXT %1(s8)
+; X64-NEXT: %edi = COPY %3(s32)
+; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %edi
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: RET 0
+
+ %val = load i8, i8* %addr
+ call void @take_char(i8 %val)
+ call void @take_char(i8 signext %val)
+ call void @take_char(i8 zeroext %val)
+ ret void
+}
+
More information about the llvm-commits
mailing list