diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2019-08-20 20:50:12 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2019-08-20 20:50:12 +0000 |
commit | e6d1592492a3a379186bfb02bd0f4eda0669c0d5 (patch) | |
tree | 599ab169a01f1c86eda9adc774edaedde2f2db5b /lib/Target/ARM/ARMCallLowering.cpp | |
parent | 1a56a5ead7a2e84bee8240f5f6b033b5f1707154 (diff) | |
download | src-e6d1592492a3a379186bfb02bd0f4eda0669c0d5.tar.gz src-e6d1592492a3a379186bfb02bd0f4eda0669c0d5.zip |
Vendor import of stripped llvm trunk r366426 (just before the release_90vendor/llvm/llvm-trunk-r366426
branch point):
https://llvm.org/svn/llvm-project/llvm/trunk@366426
Notes
Notes:
svn path=/vendor/llvm/dist/; revision=351278
svn path=/vendor/llvm/llvm-trunk-r366426/; revision=351279; tag=vendor/llvm/llvm-trunk-r366426
Diffstat (limited to 'lib/Target/ARM/ARMCallLowering.cpp')
-rw-r--r-- | lib/Target/ARM/ARMCallLowering.cpp | 176 |
1 files changed, 70 insertions, 106 deletions
diff --git a/lib/Target/ARM/ARMCallLowering.cpp b/lib/Target/ARM/ARMCallLowering.cpp index 8e80c32bcf89..0cbe6e1871e4 100644 --- a/lib/Target/ARM/ARMCallLowering.cpp +++ b/lib/Target/ARM/ARMCallLowering.cpp @@ -1,9 +1,8 @@ //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // @@ -56,7 +55,7 @@ ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI) static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI, Type *T) { if (T->isArrayTy()) - return true; + return isSupportedType(DL, TLI, T->getArrayElementType()); if (T->isStructTy()) { // For now we only allow homogeneous structs that we can manipulate with @@ -65,7 +64,7 @@ static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI, for (unsigned i = 1, e = StructT->getNumElements(); i != e; ++i) if (StructT->getElementType(i) != StructT->getElementType(0)) return false; - return true; + return isSupportedType(DL, TLI, StructT->getElementType(0)); } EVT VT = TLI.getValueType(DL, T, true); @@ -91,27 +90,27 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler { MachineInstrBuilder &MIB, CCAssignFn *AssignFn) : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {} - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); LLT p0 = LLT::pointer(0, 32); LLT s32 = LLT::scalar(32); - unsigned SPReg = MRI.createGenericVirtualRegister(p0); - MIRBuilder.buildCopy(SPReg, ARM::SP); + Register SPReg = MRI.createGenericVirtualRegister(p0); + MIRBuilder.buildCopy(SPReg, Register(ARM::SP)); - unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); + Register OffsetReg = MRI.createGenericVirtualRegister(s32); MIRBuilder.buildConstant(OffsetReg, Offset); - unsigned AddrReg = MRI.createGenericVirtualRegister(p0); + Register AddrReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); return AddrReg; } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { assert(VA.isRegLoc() && "Value shouldn't be assigned to reg"); assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?"); @@ -119,25 +118,27 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler { assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size"); assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size"); - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); MIRBuilder.buildCopy(PhysReg, ExtReg); MIB.addUse(PhysReg, RegState::Implicit); } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); auto MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(), - /* Alignment */ 0); + /* Alignment */ 1); MIRBuilder.buildStore(ExtReg, Addr, *MMO); } unsigned assignCustomValue(const CallLowering::ArgInfo &Arg, ArrayRef<CCValAssign> VAs) override { + assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet"); + CCValAssign VA = VAs[0]; assert(VA.needsCustom() && "Value doesn't need custom handling"); assert(VA.getValVT() == MVT::f64 && "Unsupported type"); @@ -152,9 +153,9 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler { assert(VA.isRegLoc() && "Value should be in reg"); assert(NextVA.isRegLoc() && "Value should be in reg"); - unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), + Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), MRI.createGenericVirtualRegister(LLT::scalar(32))}; - MIRBuilder.buildUnmerge(NewRegs, Arg.Reg); + MIRBuilder.buildUnmerge(NewRegs, Arg.Regs[0]); bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle(); if (!IsLittle) @@ -183,18 +184,17 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler { } // end anonymous namespace -void ARMCallLowering::splitToValueTypes( - const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs, - MachineFunction &MF, const SplitArgTy &PerformArgSplit) const { +void ARMCallLowering::splitToValueTypes(const ArgInfo &OrigArg, + SmallVectorImpl<ArgInfo> &SplitArgs, + MachineFunction &MF) const { const ARMTargetLowering &TLI = *getTLI<ARMTargetLowering>(); LLVMContext &Ctx = OrigArg.Ty->getContext(); const DataLayout &DL = MF.getDataLayout(); - MachineRegisterInfo &MRI = MF.getRegInfo(); const Function &F = MF.getFunction(); SmallVector<EVT, 4> SplitVTs; - SmallVector<uint64_t, 4> Offsets; - ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0); + ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, nullptr, nullptr, 0); + assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch"); if (SplitVTs.size() == 1) { // Even if there is no splitting to do, we still want to replace the @@ -202,12 +202,12 @@ void ARMCallLowering::splitToValueTypes( auto Flags = OrigArg.Flags; unsigned OriginalAlignment = DL.getABITypeAlignment(OrigArg.Ty); Flags.setOrigAlign(OriginalAlignment); - SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx), Flags, - OrigArg.IsFixed); + SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx), + Flags, OrigArg.IsFixed); return; } - unsigned FirstRegIdx = SplitArgs.size(); + // Create one ArgInfo for each virtual register. for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) { EVT SplitVT = SplitVTs[i]; Type *SplitTy = SplitVT.getTypeForEVT(Ctx); @@ -225,19 +225,16 @@ void ARMCallLowering::splitToValueTypes( Flags.setInConsecutiveRegsLast(); } - SplitArgs.push_back( - ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)), - SplitTy, Flags, OrigArg.IsFixed}); + // FIXME: We also want to split SplitTy further. + Register PartReg = OrigArg.Regs[i]; + SplitArgs.emplace_back(PartReg, SplitTy, Flags, OrigArg.IsFixed); } - - for (unsigned i = 0; i < Offsets.size(); ++i) - PerformArgSplit(SplitArgs[FirstRegIdx + i].Reg, Offsets[i] * 8); } /// Lower the return value for the already existing \p Ret. This assumes that /// \p MIRBuilder's insertion point is correct. bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, - const Value *Val, ArrayRef<unsigned> VRegs, + const Value *Val, ArrayRef<Register> VRegs, MachineInstrBuilder &Ret) const { if (!Val) // Nothing to do here. @@ -251,35 +248,22 @@ bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, if (!isSupportedType(DL, TLI, Val->getType())) return false; - SmallVector<EVT, 4> SplitEVTs; - ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); - assert(VRegs.size() == SplitEVTs.size() && - "For each split Type there should be exactly one VReg."); - - SmallVector<ArgInfo, 4> SplitVTs; - LLVMContext &Ctx = Val->getType()->getContext(); - for (unsigned i = 0; i < SplitEVTs.size(); ++i) { - ArgInfo CurArgInfo(VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)); - setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); - - SmallVector<unsigned, 4> Regs; - splitToValueTypes( - CurArgInfo, SplitVTs, MF, - [&](unsigned Reg, uint64_t Offset) { Regs.push_back(Reg); }); - if (Regs.size() > 1) - MIRBuilder.buildUnmerge(Regs, VRegs[i]); - } + ArgInfo OrigRetInfo(VRegs, Val->getType()); + setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); + + SmallVector<ArgInfo, 4> SplitRetInfos; + splitToValueTypes(OrigRetInfo, SplitRetInfos, MF); CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg()); OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret, AssignFn); - return handleAssignments(MIRBuilder, SplitVTs, RetHandler); + return handleAssignments(MIRBuilder, SplitRetInfos, RetHandler); } bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef<unsigned> VRegs) const { + ArrayRef<Register> VRegs) const { assert(!Val == VRegs.empty() && "Return value without a vreg"); auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>(); @@ -302,7 +286,9 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { CCAssignFn AssignFn) : ValueHandler(MIRBuilder, MRI, AssignFn) {} - unsigned getStackAddress(uint64_t Size, int64_t Offset, + bool isArgumentHandler() const override { return true; } + + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); @@ -319,7 +305,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { return AddrReg; } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); @@ -332,22 +318,22 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm"); auto LoadVReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); - buildLoad(LoadVReg, Addr, Size, /* Alignment */ 0, MPO); + buildLoad(LoadVReg, Addr, Size, /* Alignment */ 1, MPO); MIRBuilder.buildTrunc(ValVReg, LoadVReg); } else { // If the value is not extended, a simple load will suffice. - buildLoad(ValVReg, Addr, Size, /* Alignment */ 0, MPO); + buildLoad(ValVReg, Addr, Size, /* Alignment */ 1, MPO); } } - void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment, + void buildLoad(Register Val, Register Addr, uint64_t Size, unsigned Alignment, MachinePointerInfo &MPO) { auto MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOLoad, Size, Alignment); MIRBuilder.buildLoad(Val, Addr, *MMO); } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { assert(VA.isRegLoc() && "Value shouldn't be assigned to reg"); assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?"); @@ -376,6 +362,8 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { unsigned assignCustomValue(const ARMCallLowering::ArgInfo &Arg, ArrayRef<CCValAssign> VAs) override { + assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet"); + CCValAssign VA = VAs[0]; assert(VA.needsCustom() && "Value doesn't need custom handling"); assert(VA.getValVT() == MVT::f64 && "Unsupported type"); @@ -390,7 +378,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { assert(VA.isRegLoc() && "Value should be in reg"); assert(NextVA.isRegLoc() && "Value should be in reg"); - unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), + Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), MRI.createGenericVirtualRegister(LLT::scalar(32))}; assignValueToReg(NewRegs[0], VA.getLocReg(), VA); @@ -400,7 +388,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { if (!IsLittle) std::swap(NewRegs[0], NewRegs[1]); - MIRBuilder.buildMerge(Arg.Reg, NewRegs); + MIRBuilder.buildMerge(Arg.Regs[0], NewRegs); return 1; } @@ -423,9 +411,9 @@ struct FormalArgHandler : public IncomingValueHandler { } // end anonymous namespace -bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, - const Function &F, - ArrayRef<unsigned> VRegs) const { +bool ARMCallLowering::lowerFormalArguments( + MachineIRBuilder &MIRBuilder, const Function &F, + ArrayRef<ArrayRef<Register>> VRegs) const { auto &TLI = *getTLI<ARMTargetLowering>(); auto Subtarget = TLI.getSubtarget(); @@ -456,21 +444,13 @@ bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(), AssignFn); - SmallVector<ArgInfo, 8> ArgInfos; - SmallVector<unsigned, 4> SplitRegs; + SmallVector<ArgInfo, 8> SplitArgInfos; unsigned Idx = 0; for (auto &Arg : F.args()) { - ArgInfo AInfo(VRegs[Idx], Arg.getType()); - setArgFlags(AInfo, Idx + AttributeList::FirstArgIndex, DL, F); - - SplitRegs.clear(); - - splitToValueTypes(AInfo, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) { - SplitRegs.push_back(Reg); - }); + ArgInfo OrigArgInfo(VRegs[Idx], Arg.getType()); - if (!SplitRegs.empty()) - MIRBuilder.buildMerge(VRegs[Idx], SplitRegs); + setArgFlags(OrigArgInfo, Idx + AttributeList::FirstArgIndex, DL, F); + splitToValueTypes(OrigArgInfo, SplitArgInfos, MF); Idx++; } @@ -478,7 +458,7 @@ bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, if (!MBB.empty()) MIRBuilder.setInstr(*MBB.begin()); - if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler)) + if (!handleAssignments(MIRBuilder, SplitArgInfos, ArgHandler)) return false; // Move back to the end of the basic block. @@ -540,19 +520,19 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, // Create the call instruction so we can add the implicit uses of arg // registers, but don't insert it yet. - bool isDirect = !Callee.isReg(); - auto CallOpcode = getCallOpcode(STI, isDirect); + bool IsDirect = !Callee.isReg(); + auto CallOpcode = getCallOpcode(STI, IsDirect); auto MIB = MIRBuilder.buildInstrNoInsert(CallOpcode); - bool isThumb = STI.isThumb(); - if (isThumb) + bool IsThumb = STI.isThumb(); + if (IsThumb) MIB.add(predOps(ARMCC::AL)); MIB.add(Callee); - if (!isDirect) { + if (!IsDirect) { auto CalleeReg = Callee.getReg(); if (CalleeReg && !TRI->isPhysicalRegister(CalleeReg)) { - unsigned CalleeIdx = isThumb ? 2 : 0; + unsigned CalleeIdx = IsThumb ? 2 : 0; MIB->getOperand(CalleeIdx).setReg(constrainOperandRegClass( MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(), *MIB.getInstr(), MIB->getDesc(), Callee, CalleeIdx)); @@ -561,27 +541,22 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, MIB.addRegMask(TRI->getCallPreservedMask(MF, CallConv)); + bool IsVarArg = false; SmallVector<ArgInfo, 8> ArgInfos; for (auto Arg : OrigArgs) { if (!isSupportedType(DL, TLI, Arg.Ty)) return false; if (!Arg.IsFixed) - return false; + IsVarArg = true; if (Arg.Flags.isByVal()) return false; - SmallVector<unsigned, 8> Regs; - splitToValueTypes(Arg, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) { - Regs.push_back(Reg); - }); - - if (Regs.size() > 1) - MIRBuilder.buildUnmerge(Regs, Arg.Reg); + splitToValueTypes(Arg, ArgInfos, MF); } - auto ArgAssignFn = TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false); + auto ArgAssignFn = TLI.CCAssignFnForCall(CallConv, IsVarArg); OutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB, ArgAssignFn); if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler)) return false; @@ -594,22 +569,11 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, return false; ArgInfos.clear(); - SmallVector<unsigned, 8> SplitRegs; - splitToValueTypes(OrigRet, ArgInfos, MF, - [&](unsigned Reg, uint64_t Offset) { - SplitRegs.push_back(Reg); - }); - - auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, /*IsVarArg=*/false); + splitToValueTypes(OrigRet, ArgInfos, MF); + auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, IsVarArg); CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn); if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler)) return false; - - if (!SplitRegs.empty()) { - // We have split the value and allocated each individual piece, now build - // it up again. - MIRBuilder.buildMerge(OrigRet.Reg, SplitRegs); - } } // We now know the size of the stack - update the ADJCALLSTACKDOWN |