[llvm] [WebAssembly] Implement GlobalISel (PR #157161)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 5 11:58:12 PDT 2025
https://github.com/QuantumSegfault created https://github.com/llvm/llvm-project/pull/157161
A WIP attempt to implement the new GlobalISel system for the WebAssembly backend.
It may not end up being worth the effort in the short term, but hopefully if it works out, it will open the doors to improved code maintainability and future GlobalISel-exclusive optimizations.
>From ebe8ba6a212102faf94ba62c377136afb0791cd7 Mon Sep 17 00:00:00 2001
From: QuantumSegfault <fungi-turbos-7l at icloud.com>
Date: Fri, 5 Sep 2025 11:48:19 -0700
Subject: [PATCH] Prepare basic GlobalISel setup and implement
CallLowering::lowerFormalArguments and CallLowering::lowerReturn
---
llvm/lib/Target/WebAssembly/CMakeLists.txt | 4 +
.../GISel/WebAssemblyCallLowering.cpp | 687 ++++++++++++++++++
.../GISel/WebAssemblyCallLowering.h | 43 ++
.../GISel/WebAssemblyInstructionSelector.cpp | 0
.../GISel/WebAssemblyInstructionSelector.h | 0
.../GISel/WebAssemblyLegalizerInfo.cpp | 23 +
.../GISel/WebAssemblyLegalizerInfo.h | 29 +
.../GISel/WebAssemblyRegisterBankInfo.cpp | 0
.../GISel/WebAssemblyRegisterBankInfo.h | 0
.../WebAssembly/WebAssemblySubtarget.cpp | 30 +-
.../Target/WebAssembly/WebAssemblySubtarget.h | 14 +
.../WebAssembly/WebAssemblyTargetMachine.cpp | 30 +
12 files changed, 859 insertions(+), 1 deletion(-)
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.cpp
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.h
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyInstructionSelector.cpp
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyInstructionSelector.h
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.cpp
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.h
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyRegisterBankInfo.cpp
create mode 100644 llvm/lib/Target/WebAssembly/GISel/WebAssemblyRegisterBankInfo.h
diff --git a/llvm/lib/Target/WebAssembly/CMakeLists.txt b/llvm/lib/Target/WebAssembly/CMakeLists.txt
index 1e83cbeac50d6..371d224efc1c5 100644
--- a/llvm/lib/Target/WebAssembly/CMakeLists.txt
+++ b/llvm/lib/Target/WebAssembly/CMakeLists.txt
@@ -15,6 +15,10 @@ tablegen(LLVM WebAssemblyGenSubtargetInfo.inc -gen-subtarget)
add_public_tablegen_target(WebAssemblyCommonTableGen)
add_llvm_target(WebAssemblyCodeGen
+ GISel/WebAssemblyCallLowering.cpp
+ GISel/WebAssemblyInstructionSelector.cpp
+ GISel/WebAssemblyRegisterBankInfo.cpp
+ GISel/WebAssemblyLegalizerInfo.cpp
WebAssemblyAddMissingPrototypes.cpp
WebAssemblyArgumentMove.cpp
WebAssemblyAsmPrinter.cpp
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.cpp b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.cpp
new file mode 100644
index 0000000000000..5949d26a83840
--- /dev/null
+++ b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.cpp
@@ -0,0 +1,687 @@
+//===-- WebAssemblyCallLowering.cpp - Call lowering for GlobalISel -*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the lowering of LLVM calls to machine code calls for
+/// GlobalISel.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyCallLowering.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/LowLevelTypeUtils.h"
+#include "llvm/CodeGenTypes/LowLevelType.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugLoc.h"
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define DEBUG_TYPE "wasm-call-lowering"
+
+using namespace llvm;
+
+// Several of the following methods are internal utilities defined in
+// CodeGen/GlobalIsel/CallLowering.cpp
+// TODO: Find a better solution?
+
+// Internal utility from CallLowering.cpp
+static unsigned extendOpFromFlags(ISD::ArgFlagsTy Flags) {
+ if (Flags.isSExt())
+ return TargetOpcode::G_SEXT;
+ if (Flags.isZExt())
+ return TargetOpcode::G_ZEXT;
+ return TargetOpcode::G_ANYEXT;
+}
+
+// Internal utility from CallLowering.cpp
+/// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
+static MachineInstrBuilder
+mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
+ ArrayRef<Register> SrcRegs) {
+ MachineRegisterInfo &MRI = *B.getMRI();
+ LLT LLTy = MRI.getType(DstRegs[0]);
+ LLT PartLLT = MRI.getType(SrcRegs[0]);
+
+ // Deal with v3s16 split into v2s16
+ LLT LCMTy = getCoverTy(LLTy, PartLLT);
+ if (LCMTy == LLTy) {
+ // Common case where no padding is needed.
+ assert(DstRegs.size() == 1);
+ return B.buildConcatVectors(DstRegs[0], SrcRegs);
+ }
+
+ // We need to create an unmerge to the result registers, which may require
+ // widening the original value.
+ Register UnmergeSrcReg;
+ if (LCMTy != PartLLT) {
+ assert(DstRegs.size() == 1);
+ return B.buildDeleteTrailingVectorElements(
+ DstRegs[0], B.buildMergeLikeInstr(LCMTy, SrcRegs));
+ } else {
+ // We don't need to widen anything if we're extracting a scalar which was
+ // promoted to a vector e.g. s8 -> v4s8 -> s8
+ assert(SrcRegs.size() == 1);
+ UnmergeSrcReg = SrcRegs[0];
+ }
+
+ int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
+
+ SmallVector<Register, 8> PadDstRegs(NumDst);
+ llvm::copy(DstRegs, PadDstRegs.begin());
+
+ // Create the excess dead defs for the unmerge.
+ for (int I = DstRegs.size(); I != NumDst; ++I)
+ PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
+
+ if (PadDstRegs.size() == 1)
+ return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);
+ return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
+}
+
+// Internal utility from CallLowering.cpp
+/// Create a sequence of instructions to combine pieces split into register
+/// typed values to the original IR value. \p OrigRegs contains the destination
+/// value registers of type \p LLTy, and \p Regs contains the legalized pieces
+/// with type \p PartLLT. This is used for incoming values (physregs to vregs).
+static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
+ ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
+ const ISD::ArgFlagsTy Flags) {
+ MachineRegisterInfo &MRI = *B.getMRI();
+
+ if (PartLLT == LLTy) {
+ // We should have avoided introducing a new virtual register, and just
+ // directly assigned here.
+ assert(OrigRegs[0] == Regs[0]);
+ return;
+ }
+
+ if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
+ Regs.size() == 1) {
+ B.buildBitcast(OrigRegs[0], Regs[0]);
+ return;
+ }
+
+ // A vector PartLLT needs extending to LLTy's element size.
+ // E.g. <2 x s64> = G_SEXT <2 x s32>.
+ if (PartLLT.isVector() == LLTy.isVector() &&
+ PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
+ (!PartLLT.isVector() ||
+ PartLLT.getElementCount() == LLTy.getElementCount()) &&
+ OrigRegs.size() == 1 && Regs.size() == 1) {
+ Register SrcReg = Regs[0];
+
+ LLT LocTy = MRI.getType(SrcReg);
+
+ if (Flags.isSExt()) {
+ SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
+ .getReg(0);
+ } else if (Flags.isZExt()) {
+ SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
+ .getReg(0);
+ }
+
+ // Sometimes pointers are passed zero extended.
+ LLT OrigTy = MRI.getType(OrigRegs[0]);
+ if (OrigTy.isPointer()) {
+ LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
+ B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
+ return;
+ }
+
+ B.buildTrunc(OrigRegs[0], SrcReg);
+ return;
+ }
+
+ if (!LLTy.isVector() && !PartLLT.isVector()) {
+ assert(OrigRegs.size() == 1);
+ LLT OrigTy = MRI.getType(OrigRegs[0]);
+
+ unsigned SrcSize = PartLLT.getSizeInBits().getFixedValue() * Regs.size();
+ if (SrcSize == OrigTy.getSizeInBits())
+ B.buildMergeValues(OrigRegs[0], Regs);
+ else {
+ auto Widened = B.buildMergeLikeInstr(LLT::scalar(SrcSize), Regs);
+ B.buildTrunc(OrigRegs[0], Widened);
+ }
+
+ return;
+ }
+
+ if (PartLLT.isVector()) {
+ assert(OrigRegs.size() == 1);
+ SmallVector<Register> CastRegs(Regs);
+
+ // If PartLLT is a mismatched vector in both number of elements and element
+ // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
+ // have the same elt type, i.e. v4s32.
+ // TODO: Extend this coersion to element multiples other than just 2.
+ if (TypeSize::isKnownGT(PartLLT.getSizeInBits(), LLTy.getSizeInBits()) &&
+ PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
+ Regs.size() == 1) {
+ LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
+ .changeElementCount(PartLLT.getElementCount() * 2);
+ CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
+ PartLLT = NewTy;
+ }
+
+ if (LLTy.getScalarType() == PartLLT.getElementType()) {
+ mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
+ } else {
+ unsigned I = 0;
+ LLT GCDTy = getGCDType(LLTy, PartLLT);
+
+ // We are both splitting a vector, and bitcasting its element types. Cast
+ // the source pieces into the appropriate number of pieces with the result
+ // element type.
+ for (Register SrcReg : CastRegs)
+ CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
+ mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
+ }
+
+ return;
+ }
+
+ assert(LLTy.isVector() && !PartLLT.isVector());
+
+ LLT DstEltTy = LLTy.getElementType();
+
+ // Pointer information was discarded. We'll need to coerce some register types
+ // to avoid violating type constraints.
+ LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
+
+ assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
+
+ if (DstEltTy == PartLLT) {
+ // Vector was trivially scalarized.
+
+ if (RealDstEltTy.isPointer()) {
+ for (Register Reg : Regs)
+ MRI.setType(Reg, RealDstEltTy);
+ }
+
+ B.buildBuildVector(OrigRegs[0], Regs);
+ } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
+ // Deal with vector with 64-bit elements decomposed to 32-bit
+ // registers. Need to create intermediate 64-bit elements.
+ SmallVector<Register, 8> EltMerges;
+ int PartsPerElt =
+ divideCeil(DstEltTy.getSizeInBits(), PartLLT.getSizeInBits());
+ LLT ExtendedPartTy = LLT::scalar(PartLLT.getSizeInBits() * PartsPerElt);
+
+ for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
+ auto Merge =
+ B.buildMergeLikeInstr(ExtendedPartTy, Regs.take_front(PartsPerElt));
+ if (ExtendedPartTy.getSizeInBits() > RealDstEltTy.getSizeInBits())
+ Merge = B.buildTrunc(RealDstEltTy, Merge);
+ // Fix the type in case this is really a vector of pointers.
+ MRI.setType(Merge.getReg(0), RealDstEltTy);
+ EltMerges.push_back(Merge.getReg(0));
+ Regs = Regs.drop_front(PartsPerElt);
+ }
+
+ B.buildBuildVector(OrigRegs[0], EltMerges);
+ } else {
+ // Vector was split, and elements promoted to a wider type.
+ // FIXME: Should handle floating point promotions.
+ unsigned NumElts = LLTy.getNumElements();
+ LLT BVType = LLT::fixed_vector(NumElts, PartLLT);
+
+ Register BuildVec;
+ if (NumElts == Regs.size())
+ BuildVec = B.buildBuildVector(BVType, Regs).getReg(0);
+ else {
+ // Vector elements are packed in the inputs.
+ // e.g. we have a <4 x s16> but 2 x s32 in regs.
+ assert(NumElts > Regs.size());
+ LLT SrcEltTy = MRI.getType(Regs[0]);
+
+ LLT OriginalEltTy = MRI.getType(OrigRegs[0]).getElementType();
+
+ // Input registers contain packed elements.
+ // Determine how many elements per reg.
+ assert((SrcEltTy.getSizeInBits() % OriginalEltTy.getSizeInBits()) == 0);
+ unsigned EltPerReg =
+ (SrcEltTy.getSizeInBits() / OriginalEltTy.getSizeInBits());
+
+ SmallVector<Register, 0> BVRegs;
+ BVRegs.reserve(Regs.size() * EltPerReg);
+ for (Register R : Regs) {
+ auto Unmerge = B.buildUnmerge(OriginalEltTy, R);
+ for (unsigned K = 0; K < EltPerReg; ++K)
+ BVRegs.push_back(B.buildAnyExt(PartLLT, Unmerge.getReg(K)).getReg(0));
+ }
+
+ // We may have some more elements in BVRegs, e.g. if we have 2 s32 pieces
+ // for a <3 x s16> vector. We should have less than EltPerReg extra items.
+ if (BVRegs.size() > NumElts) {
+ assert((BVRegs.size() - NumElts) < EltPerReg);
+ BVRegs.truncate(NumElts);
+ }
+ BuildVec = B.buildBuildVector(BVType, BVRegs).getReg(0);
+ }
+ B.buildTrunc(OrigRegs[0], BuildVec);
+ }
+}
+
+// Internal utility from CallLowering.cpp
+/// Create a sequence of instructions to expand the value in \p SrcReg (of type
+/// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
+/// contain the type of scalar value extension if necessary.
+///
+/// This is used for outgoing values (vregs to physregs)
+static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
+ Register SrcReg, LLT SrcTy, LLT PartTy,
+ unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
+ // We could just insert a regular copy, but this is unreachable at the moment.
+ assert(SrcTy != PartTy && "identical part types shouldn't reach here");
+
+ const TypeSize PartSize = PartTy.getSizeInBits();
+
+ if (PartTy.isVector() == SrcTy.isVector() &&
+ PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
+ assert(DstRegs.size() == 1);
+ B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
+ return;
+ }
+
+ if (SrcTy.isVector() && !PartTy.isVector() &&
+ TypeSize::isKnownGT(PartSize, SrcTy.getElementType().getSizeInBits())) {
+ // Vector was scalarized, and the elements extended.
+ auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
+ for (int i = 0, e = DstRegs.size(); i != e; ++i)
+ B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
+ return;
+ }
+
+ if (SrcTy.isVector() && PartTy.isVector() &&
+ PartTy.getSizeInBits() == SrcTy.getSizeInBits() &&
+ ElementCount::isKnownLT(SrcTy.getElementCount(),
+ PartTy.getElementCount())) {
+ // A coercion like: v2f32 -> v4f32 or nxv2f32 -> nxv4f32
+ Register DstReg = DstRegs.front();
+ B.buildPadVectorWithUndefElements(DstReg, SrcReg);
+ return;
+ }
+
+ LLT GCDTy = getGCDType(SrcTy, PartTy);
+ if (GCDTy == PartTy) {
+ // If this already evenly divisible, we can create a simple unmerge.
+ B.buildUnmerge(DstRegs, SrcReg);
+ return;
+ }
+
+ if (SrcTy.isVector() && !PartTy.isVector() &&
+ SrcTy.getScalarSizeInBits() > PartTy.getSizeInBits()) {
+ LLT ExtTy =
+ LLT::vector(SrcTy.getElementCount(),
+ LLT::scalar(PartTy.getScalarSizeInBits() * DstRegs.size() /
+ SrcTy.getNumElements()));
+ auto Ext = B.buildAnyExt(ExtTy, SrcReg);
+ B.buildUnmerge(DstRegs, Ext);
+ return;
+ }
+
+ MachineRegisterInfo &MRI = *B.getMRI();
+ LLT DstTy = MRI.getType(DstRegs[0]);
+ LLT LCMTy = getCoverTy(SrcTy, PartTy);
+
+ if (PartTy.isVector() && LCMTy == PartTy) {
+ assert(DstRegs.size() == 1);
+ B.buildPadVectorWithUndefElements(DstRegs[0], SrcReg);
+ return;
+ }
+
+ const unsigned DstSize = DstTy.getSizeInBits();
+ const unsigned SrcSize = SrcTy.getSizeInBits();
+ unsigned CoveringSize = LCMTy.getSizeInBits();
+
+ Register UnmergeSrc = SrcReg;
+
+ if (!LCMTy.isVector() && CoveringSize != SrcSize) {
+ // For scalars, it's common to be able to use a simple extension.
+ if (SrcTy.isScalar() && DstTy.isScalar()) {
+ CoveringSize = alignTo(SrcSize, DstSize);
+ LLT CoverTy = LLT::scalar(CoveringSize);
+ UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
+ } else {
+ // Widen to the common type.
+ // FIXME: This should respect the extend type
+ Register Undef = B.buildUndef(SrcTy).getReg(0);
+ SmallVector<Register, 8> MergeParts(1, SrcReg);
+ for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
+ MergeParts.push_back(Undef);
+ UnmergeSrc = B.buildMergeLikeInstr(LCMTy, MergeParts).getReg(0);
+ }
+ }
+
+ if (LCMTy.isVector() && CoveringSize != SrcSize)
+ UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);
+
+ B.buildUnmerge(DstRegs, UnmergeSrc);
+}
+
+// Test whether the given calling convention is supported.
+static bool callingConvSupported(CallingConv::ID CallConv) {
+ // We currently support the language-independent target-independent
+ // conventions. We don't yet have a way to annotate calls with properties like
+ // "cold", and we don't have any call-clobbered registers, so these are mostly
+ // all handled the same.
+ return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
+ CallConv == CallingConv::Cold ||
+ CallConv == CallingConv::PreserveMost ||
+ CallConv == CallingConv::PreserveAll ||
+ CallConv == CallingConv::CXX_FAST_TLS ||
+ CallConv == CallingConv::WASM_EmscriptenInvoke ||
+ CallConv == CallingConv::Swift;
+}
+
+static void fail(MachineIRBuilder &MIRBuilder, const char *Msg) {
+ MachineFunction &MF = MIRBuilder.getMF();
+ MIRBuilder.getContext().diagnose(
+ DiagnosticInfoUnsupported(MF.getFunction(), Msg, MIRBuilder.getDL()));
+}
+
+WebAssemblyCallLowering::WebAssemblyCallLowering(
+ const WebAssemblyTargetLowering &TLI)
+ : CallLowering(&TLI) {}
+
+bool WebAssemblyCallLowering::canLowerReturn(MachineFunction &MF,
+ CallingConv::ID CallConv,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ bool IsVarArg) const {
+ return WebAssembly::canLowerReturn(Outs.size(),
+ &MF.getSubtarget<WebAssemblySubtarget>());
+}
+
+bool WebAssemblyCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
+ const Value *Val,
+ ArrayRef<Register> VRegs,
+ FunctionLoweringInfo &FLI,
+ Register SwiftErrorVReg) const {
+ auto MIB = MIRBuilder.buildInstrNoInsert(WebAssembly::RETURN);
+
+ assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
+ "Return value without a vreg");
+
+ if (Val && !FLI.CanLowerReturn) {
+ insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
+ } else if (!VRegs.empty()) {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = MF.getFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const WebAssemblyTargetLowering &TLI = *getTLI<WebAssemblyTargetLowering>();
+ auto &DL = F.getDataLayout();
+ LLVMContext &Ctx = Val->getType()->getContext();
+
+ SmallVector<EVT, 4> SplitEVTs;
+ ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
+ assert(VRegs.size() == SplitEVTs.size() &&
+ "For each split Type there should be exactly one VReg.");
+
+ SmallVector<ArgInfo, 8> SplitArgs;
+ CallingConv::ID CallConv = F.getCallingConv();
+
+ unsigned i = 0;
+ for (auto SplitEVT : SplitEVTs) {
+ Register CurVReg = VRegs[i];
+ ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVT.getTypeForEVT(Ctx), 0};
+ setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
+
+ splitToValueTypes(CurArgInfo, SplitArgs, DL, CallConv);
+ ++i;
+ }
+
+ for (auto &Arg : SplitArgs) {
+ EVT OrigVT = TLI.getValueType(DL, Arg.Ty);
+ MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, OrigVT);
+ LLT OrigLLT = getLLTForType(*Arg.Ty, DL);
+ LLT NewLLT = getLLTForMVT(NewVT);
+
+ // If we need to split the type over multiple regs, check it's a scenario
+ // we currently support.
+ unsigned NumParts =
+ TLI.getNumRegistersForCallingConv(Ctx, CallConv, OrigVT);
+
+ ISD::ArgFlagsTy OrigFlags = Arg.Flags[0];
+ Arg.Flags.clear();
+
+ for (unsigned Part = 0; Part < NumParts; ++Part) {
+ ISD::ArgFlagsTy Flags = OrigFlags;
+ if (Part == 0) {
+ Flags.setSplit();
+ } else {
+ Flags.setOrigAlign(Align(1));
+ if (Part == NumParts - 1)
+ Flags.setSplitEnd();
+ }
+
+ Arg.Flags.push_back(Flags);
+ }
+
+ Arg.OrigRegs.assign(Arg.Regs.begin(), Arg.Regs.end());
+ if (NumParts != 1 || OrigVT != NewVT) {
+ // If we can't directly assign the register, we need one or more
+ // intermediate values.
+ Arg.Regs.resize(NumParts);
+
+ // For each split register, create and assign a vreg that will store
+ // the incoming component of the larger value. These will later be
+ // merged to form the final vreg.
+ for (unsigned Part = 0; Part < NumParts; ++Part) {
+ Arg.Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
+ }
+ buildCopyToRegs(MIRBuilder, Arg.Regs, Arg.OrigRegs[0], OrigLLT, NewLLT,
+ extendOpFromFlags(Arg.Flags[0]));
+ }
+
+ for (unsigned Part = 0; Part < NumParts; ++Part) {
+ MIB.addUse(Arg.Regs[Part]);
+ }
+ }
+ }
+
+ if (SwiftErrorVReg) {
+ llvm_unreachable("WASM does not `supportSwiftError`, yet SwiftErrorVReg is "
+ "improperly valid.");
+ }
+
+ MIRBuilder.insertInstr(MIB);
+ return true;
+}
+
+static unsigned getWASMArgOpcode(MVT ArgType) {
+#define MVT_CASE(type) \
+ case MVT::type: \
+ return WebAssembly::ARGUMENT_##type;
+
+ switch (ArgType.SimpleTy) {
+ MVT_CASE(i32)
+ MVT_CASE(i64)
+ MVT_CASE(f32)
+ MVT_CASE(f64)
+ MVT_CASE(funcref)
+ MVT_CASE(externref)
+ MVT_CASE(exnref)
+ MVT_CASE(v16i8)
+ MVT_CASE(v8i16)
+ MVT_CASE(v4i32)
+ MVT_CASE(v2i64)
+ MVT_CASE(v4f32)
+ MVT_CASE(v2f64)
+ MVT_CASE(v8f16)
+ default:
+ break;
+ }
+ llvm_unreachable("Found unexpected type for WASM argument");
+
+#undef MVT_CASE
+}
+
+bool WebAssemblyCallLowering::lowerFormalArguments(
+ MachineIRBuilder &MIRBuilder, const Function &F,
+ ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const {
+
+ MachineFunction &MF = MIRBuilder.getMF();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ WebAssemblyFunctionInfo *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
+ const DataLayout &DL = F.getDataLayout();
+ auto &TLI = *getTLI<WebAssemblyTargetLowering>();
+ LLVMContext &Ctx = MIRBuilder.getContext();
+ const CallingConv::ID CallConv = F.getCallingConv();
+
+ if (!callingConvSupported(F.getCallingConv())) {
+ fail(MIRBuilder, "WebAssembly doesn't support non-C calling conventions");
+ return false;
+ }
+
+ // Set up the live-in for the incoming ARGUMENTS.
+ MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
+
+ SmallVector<ArgInfo, 8> SplitArgs;
+
+ if (!FLI.CanLowerReturn) {
+ dbgs() << "grath\n";
+ insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
+ }
+ unsigned i = 0;
+
+ bool HasSwiftErrorArg = false;
+ bool HasSwiftSelfArg = false;
+ for (const auto &Arg : F.args()) {
+ ArgInfo OrigArg{VRegs[i], Arg.getType(), i};
+ setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
+
+ HasSwiftSelfArg |= Arg.hasSwiftSelfAttr();
+ HasSwiftErrorArg |= Arg.hasSwiftErrorAttr();
+ if (Arg.hasInAllocaAttr()) {
+ fail(MIRBuilder, "WebAssembly hasn't implemented inalloca arguments");
+ return false;
+ }
+ if (Arg.hasNestAttr()) {
+ fail(MIRBuilder, "WebAssembly hasn't implemented nest arguments");
+ return false;
+ }
+ splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv());
+ ++i;
+ }
+
+ unsigned FinalArgIdx = 0;
+ for (auto &Arg : SplitArgs) {
+ EVT OrigVT = TLI.getValueType(DL, Arg.Ty);
+ MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, OrigVT);
+ LLT OrigLLT = getLLTForType(*Arg.Ty, DL);
+ LLT NewLLT = getLLTForMVT(NewVT);
+
+ // If we need to split the type over multiple regs, check it's a scenario
+ // we currently support.
+ unsigned NumParts =
+ TLI.getNumRegistersForCallingConv(Ctx, CallConv, OrigVT);
+
+ ISD::ArgFlagsTy OrigFlags = Arg.Flags[0];
+ Arg.Flags.clear();
+
+ for (unsigned Part = 0; Part < NumParts; ++Part) {
+ ISD::ArgFlagsTy Flags = OrigFlags;
+ if (Part == 0) {
+ Flags.setSplit();
+ } else {
+ Flags.setOrigAlign(Align(1));
+ if (Part == NumParts - 1)
+ Flags.setSplitEnd();
+ }
+
+ Arg.Flags.push_back(Flags);
+ }
+
+ Arg.OrigRegs.assign(Arg.Regs.begin(), Arg.Regs.end());
+ if (NumParts != 1 || OrigVT != NewVT) {
+ // If we can't directly assign the register, we need one or more
+ // intermediate values.
+ Arg.Regs.resize(NumParts);
+
+ // For each split register, create and assign a vreg that will store
+ // the incoming component of the larger value. These will later be
+ // merged to form the final vreg.
+ for (unsigned Part = 0; Part < NumParts; ++Part) {
+ Arg.Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
+ }
+ buildCopyFromRegs(MIRBuilder, Arg.OrigRegs, Arg.Regs, OrigLLT, NewLLT,
+ Arg.Flags[0]);
+ }
+
+ for (unsigned Part = 0; Part < NumParts; ++Part) {
+ MIRBuilder.buildInstr(getWASMArgOpcode(NewVT))
+ .addDef(Arg.Regs[Part])
+ .addImm(FinalArgIdx);
+ MFI->addParam(NewVT);
+ ++FinalArgIdx;
+ }
+ }
+
+ /**/
+
+ // For swiftcc, emit additional swiftself and swifterror arguments
+ // if there aren't. These additional arguments are also added for callee
+ // signature They are necessary to match callee and caller signature for
+ // indirect call.
+ auto PtrVT = TLI.getPointerTy(DL);
+ if (CallConv == CallingConv::Swift) {
+ if (!HasSwiftSelfArg) {
+ MFI->addParam(PtrVT);
+ }
+ if (!HasSwiftErrorArg) {
+ MFI->addParam(PtrVT);
+ }
+ }
+
+ // Varargs are copied into a buffer allocated by the caller, and a pointer to
+ // the buffer is passed as an argument.
+ if (F.isVarArg()) {
+ auto PtrVT = TLI.getPointerTy(DL);
+ Register VarargVreg = MF.getRegInfo().createGenericVirtualRegister(
+ getLLTForType(*PointerType::get(Ctx, 0), DL));
+ MFI->setVarargBufferVreg(VarargVreg);
+
+ MIRBuilder.buildInstr(getWASMArgOpcode(PtrVT))
+ .addDef(VarargVreg)
+ .addImm(FinalArgIdx);
+
+ MFI->addParam(PtrVT);
+ ++FinalArgIdx;
+ }
+
+ // Record the number and types of arguments and results.
+ SmallVector<MVT, 4> Params;
+ SmallVector<MVT, 4> Results;
+ computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
+ MF.getFunction(), MF.getTarget(), Params, Results);
+ for (MVT VT : Results)
+ MFI->addResult(VT);
+
+ // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
+ // the param logic here with ComputeSignatureVTs
+ assert(MFI->getParams().size() == Params.size() &&
+ std::equal(MFI->getParams().begin(), MFI->getParams().end(),
+ Params.begin()));
+ return true;
+}
+
+bool WebAssemblyCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
+ CallLoweringInfo &Info) const {
+ return false;
+}
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.h b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.h
new file mode 100644
index 0000000000000..d22f7cbd17eb3
--- /dev/null
+++ b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyCallLowering.h
@@ -0,0 +1,43 @@
+//===-- WebAssemblyCallLowering.h - Call lowering for GlobalISel -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_GISEL_WEBASSEMBLYCALLLOWERING_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_GISEL_WEBASSEMBLYCALLLOWERING_H
+
+#include "WebAssemblyISelLowering.h"
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/IR/CallingConv.h"
+
+namespace llvm {
+
+class WebAssemblyTargetLowering;
+
+class WebAssemblyCallLowering : public CallLowering {
+public:
+ WebAssemblyCallLowering(const WebAssemblyTargetLowering &TLI);
+
+ bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ bool IsVarArg) const override;
+ bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+ ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
+ Register SwiftErrorVReg) const override;
+ bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
+ ArrayRef<ArrayRef<Register>> VRegs,
+ FunctionLoweringInfo &FLI) const override;
+ bool lowerCall(MachineIRBuilder &MIRBuilder,
+ CallLoweringInfo &Info) const override;
+};
+} // namespace llvm
+
+#endif
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyInstructionSelector.cpp b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyInstructionSelector.cpp
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyInstructionSelector.h b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyInstructionSelector.h
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.cpp b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.cpp
new file mode 100644
index 0000000000000..3acdabb5612cc
--- /dev/null
+++ b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.cpp
@@ -0,0 +1,23 @@
+//===- WebAssemblyLegalizerInfo.h --------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements the targeting of the Machinelegalizer class for
+/// WebAssembly
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyLegalizerInfo.h"
+
+#define DEBUG_TYPE "wasm-legalinfo"
+
+using namespace llvm;
+using namespace LegalizeActions;
+
+WebAssemblyLegalizerInfo::WebAssemblyLegalizerInfo(
+ const WebAssemblySubtarget &ST) {
+ getLegacyLegalizerInfo().computeTables();
+}
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.h b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.h
new file mode 100644
index 0000000000000..c02205fc7ae0d
--- /dev/null
+++ b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyLegalizerInfo.h
@@ -0,0 +1,29 @@
+//===- WebAssemblyLegalizerInfo.h --------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the targeting of the Machinelegalizer class for
+/// WebAssembly
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_GISEL_WEBASSEMBLYMACHINELEGALIZER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_GISEL_WEBASSEMBLYMACHINELEGALIZER_H
+
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+
+namespace llvm {
+
+class WebAssemblySubtarget;
+
+/// This class provides the information for the BPF target legalizer for
+/// GlobalISel.
+class WebAssemblyLegalizerInfo : public LegalizerInfo {
+public:
+ WebAssemblyLegalizerInfo(const WebAssemblySubtarget &ST);
+};
+} // namespace llvm
+#endif
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyRegisterBankInfo.cpp b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyRegisterBankInfo.cpp
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/llvm/lib/Target/WebAssembly/GISel/WebAssemblyRegisterBankInfo.h b/llvm/lib/Target/WebAssembly/GISel/WebAssemblyRegisterBankInfo.h
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
index a3ce40f0297ec..3ea8b9f85819f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -13,8 +13,12 @@
//===----------------------------------------------------------------------===//
#include "WebAssemblySubtarget.h"
+#include "GISel/WebAssemblyCallLowering.h"
+#include "GISel/WebAssemblyLegalizerInfo.h"
+#include "GISel/WebAssemblyRegisterBankInfo.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblyTargetMachine.h"
#include "llvm/MC/TargetRegistry.h"
using namespace llvm;
@@ -66,7 +70,15 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
const TargetMachine &TM)
: WebAssemblyGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
- TLInfo(TM, *this) {}
+ TLInfo(TM, *this) {
+ CallLoweringInfo.reset(new WebAssemblyCallLowering(*getTargetLowering()));
+ Legalizer.reset(new WebAssemblyLegalizerInfo(*this));
+ /*auto *RBI = new WebAssemblyRegisterBankInfo(*getRegisterInfo());
+ RegBankInfo.reset(RBI);
+
+ InstSelector.reset(createWebAssemblyInstructionSelector(
+ *static_cast<const WebAssemblyTargetMachine *>(&TM), *this, *RBI));*/
+}
bool WebAssemblySubtarget::enableAtomicExpand() const {
// If atomics are disabled, atomic ops are lowered instead of expanded
@@ -81,3 +93,19 @@ bool WebAssemblySubtarget::enableMachineScheduler() const {
}
bool WebAssemblySubtarget::useAA() const { return true; }
+
+const CallLowering *WebAssemblySubtarget::getCallLowering() const {
+ return CallLoweringInfo.get();
+}
+
+InstructionSelector *WebAssemblySubtarget::getInstructionSelector() const {
+ return InstSelector.get();
+}
+
+const LegalizerInfo *WebAssemblySubtarget::getLegalizerInfo() const {
+ return Legalizer.get();
+}
+
+const RegisterBankInfo *WebAssemblySubtarget::getRegBankInfo() const {
+ return RegBankInfo.get();
+}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
index 2f88bbba05d00..c195f995009b1 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -20,6 +20,10 @@
#include "WebAssemblyISelLowering.h"
#include "WebAssemblyInstrInfo.h"
#include "WebAssemblySelectionDAGInfo.h"
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/RegisterBankInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include <string>
@@ -64,6 +68,11 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
WebAssemblySelectionDAGInfo TSInfo;
WebAssemblyTargetLowering TLInfo;
+ std::unique_ptr<CallLowering> CallLoweringInfo;
+ std::unique_ptr<InstructionSelector> InstSelector;
+ std::unique_ptr<LegalizerInfo> Legalizer;
+ std::unique_ptr<RegisterBankInfo> RegBankInfo;
+
WebAssemblySubtarget &initializeSubtargetDependencies(StringRef CPU,
StringRef FS);
@@ -118,6 +127,11 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
/// Parses features string setting specified subtarget options. Definition of
/// function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
+
+ const CallLowering *getCallLowering() const override;
+ InstructionSelector *getInstructionSelector() const override;
+ const LegalizerInfo *getLegalizerInfo() const override;
+ const RegisterBankInfo *getRegBankInfo() const override;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index 6827ee6527947..84d4315ca9fc0 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -20,6 +20,10 @@
#include "WebAssemblyTargetObjectFile.h"
#include "WebAssemblyTargetTransformInfo.h"
#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
+#include "llvm/CodeGen/GlobalISel/Legalizer.h"
+#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
#include "llvm/CodeGen/MIRParser/MIParser.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
@@ -92,6 +96,7 @@ LLVMInitializeWebAssemblyTarget() {
// Register backend passes
auto &PR = *PassRegistry::getPassRegistry();
+ initializeGlobalISel(PR);
initializeWebAssemblyAddMissingPrototypesPass(PR);
initializeWebAssemblyLowerEmscriptenEHSjLjPass(PR);
initializeLowerGlobalDtorsLegacyPassPass(PR);
@@ -455,6 +460,11 @@ class WebAssemblyPassConfig final : public TargetPassConfig {
// No reg alloc
bool addRegAssignAndRewriteOptimized() override { return false; }
+
+ bool addIRTranslator() override;
+ bool addLegalizeMachineIR() override;
+ bool addRegBankSelect() override;
+ bool addGlobalInstructionSelect() override;
};
} // end anonymous namespace
@@ -675,6 +685,26 @@ bool WebAssemblyPassConfig::addPreISel() {
return false;
}
+bool WebAssemblyPassConfig::addIRTranslator() {
+ addPass(new IRTranslator());
+ return false;
+}
+
+bool WebAssemblyPassConfig::addLegalizeMachineIR() {
+ addPass(new Legalizer());
+ return false;
+}
+
+bool WebAssemblyPassConfig::addRegBankSelect() {
+ addPass(new RegBankSelect());
+ return false;
+}
+
+bool WebAssemblyPassConfig::addGlobalInstructionSelect() {
+ addPass(new InstructionSelect(getOptLevel()));
+ return false;
+}
+
yaml::MachineFunctionInfo *
WebAssemblyTargetMachine::createDefaultFuncInfoYAML() const {
return new yaml::WebAssemblyFunctionInfo();
More information about the llvm-commits
mailing list