aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/X86/X86FrameLowering.cpp')
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp529
1 files changed, 401 insertions, 128 deletions
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
index f5ffe0cf7e88..03d925692adf 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -159,6 +159,8 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
unsigned Opc = MBBI->getOpcode();
switch (Opc) {
default: return 0;
+ case TargetOpcode::PATCHABLE_RET:
+ case X86::RET:
case X86::RETL:
case X86::RETQ:
case X86::RETIL:
@@ -314,8 +316,8 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
}
MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
- int64_t Offset, bool InEpilogue) const {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
assert(Offset != 0 && "zero offset stack adjustment requested");
// On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
@@ -374,16 +376,33 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
unsigned Opc = PI->getOpcode();
int Offset = 0;
+ if (!doMergeWithPrevious && NI != MBB.end() &&
+ NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) {
+ // Don't merge with the next instruction if it has CFI.
+ return Offset;
+ }
+
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
- Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
PI->getOperand(0).getReg() == StackPtr){
+ assert(PI->getOperand(1).getReg() == StackPtr);
Offset += PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
+ } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ PI->getOperand(0).getReg() == StackPtr &&
+ PI->getOperand(1).getReg() == StackPtr &&
+ PI->getOperand(2).getImm() == 1 &&
+ PI->getOperand(3).getReg() == X86::NoRegister &&
+ PI->getOperand(5).getReg() == X86::NoRegister) {
+ // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
+ Offset += PI->getOperand(4).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
} else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
PI->getOperand(0).getReg() == StackPtr) {
+ assert(PI->getOperand(1).getReg() == StackPtr);
Offset -= PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
@@ -393,18 +412,18 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
}
void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- MCCFIInstruction CFIInst) const {
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL,
+ const MCCFIInstruction &CFIInst) const {
MachineFunction &MF = *MBB.getParent();
unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
}
-void
-X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- DebugLoc DL) const {
+void X86FrameLowering::emitCalleeSavedFrameMoves(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
@@ -429,7 +448,7 @@ X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineInstr *X86FrameLowering::emitStackProbe(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL,
+ const DebugLoc &DL,
bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
if (STI.isTargetWindowsCoreCLR()) {
@@ -457,6 +476,8 @@ void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
}
if (ChkStkStub != nullptr) {
+ assert(!ChkStkStub->isBundled() &&
+ "Not expecting bundled instructions here");
MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
assert(std::prev(MBBI).operator==(ChkStkStub) &&
"MBBI expected after __chkstk_stub.");
@@ -467,8 +488,8 @@ void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
}
MachineInstr *X86FrameLowering::emitStackProbeInline(
- MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
assert(STI.is64Bit() && "different expansion needed for 32 bit");
assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
@@ -679,12 +700,12 @@ MachineInstr *X86FrameLowering::emitStackProbeInline(
// Possible TODO: physreg liveness for InProlog case.
- return ContinueMBBI;
+ return &*ContinueMBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeCall(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
unsigned CallOp;
@@ -743,19 +764,19 @@ MachineInstr *X86FrameLowering::emitStackProbeCall(
ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
}
- return MBBI;
+ return &*MBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
assert(InProlog && "ChkStkStub called outside prolog!");
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__chkstk_stub");
- return MBBI;
+ return &*MBBI;
}
static unsigned calculateSetFPREG(uint64_t SPAdjust) {
@@ -786,7 +807,7 @@ uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) con
void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned Reg,
+ const DebugLoc &DL, unsigned Reg,
uint64_t MaxAlign) const {
uint64_t Val = -MaxAlign;
unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
@@ -950,6 +971,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
!MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
+ X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
MFI->setStackSize(StackSize);
}
@@ -1009,7 +1031,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
+ NumBytes = alignTo(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
@@ -1130,7 +1152,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
- AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
+ AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
// Check whether EAX is livein for this block.
bool isEAXAlive = isEAXLiveIn(MBB);
@@ -1260,7 +1282,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
- const MachineInstr *FrameInstr = &*MBBI;
+ const MachineInstr &FrameInstr = *MBBI;
++MBBI;
if (NeedsWinCFI) {
@@ -1360,6 +1382,18 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
if (PushedRegs)
emitCalleeSavedFrameMoves(MBB, MBBI, DL);
}
+
+ // X86 Interrupt handling function cannot assume anything about the direction
+ // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
+ // in each prologue of interrupt handler function.
+ //
+ // FIXME: Create "cld" instruction only in these cases:
+ // 1. The interrupt handling function uses any of the "rep" instructions.
+ // 2. Interrupt handling function calls another function.
+ //
+ if (Fn->getCallingConv() == CallingConv::X86_INTR)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
+ .setMIFlag(MachineInstr::FrameSetup);
}
bool X86FrameLowering::canUseLEAForSPInEpilogue(
@@ -1373,8 +1407,8 @@ bool X86FrameLowering::canUseLEAForSPInEpilogue(
return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
}
-static bool isFuncletReturnInstr(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool isFuncletReturnInstr(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
case X86::CATCHRET:
case X86::CLEANUPRET:
return true;
@@ -1400,11 +1434,10 @@ static bool isFuncletReturnInstr(MachineInstr *MI) {
unsigned
X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
- // getFrameIndexReferenceFromSP has an out ref parameter for the stack
- // pointer register; pass a dummy that we ignore
unsigned SPReg;
- int Offset = getFrameIndexReferenceFromSP(MF, Info.PSPSymFrameIdx, SPReg);
- assert(Offset >= 0);
+ int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
+ /*IgnoreSPUpdates*/ true);
+ assert(Offset >= 0 && SPReg == TRI->getStackRegister());
return static_cast<unsigned>(Offset);
}
@@ -1429,18 +1462,25 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
// RBP is not included in the callee saved register block. After pushing RBP,
// everything is 16 byte aligned. Everything we allocate before an outgoing
// call must also be 16 byte aligned.
- unsigned FrameSizeMinusRBP =
- RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
+ unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
// Subtract out the size of the callee saved registers. This is how much stack
// each funclet will allocate.
return FrameSizeMinusRBP - CSSize;
}
+static bool isTailCallOpcode(unsigned Opc) {
+ return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
+ Opc == X86::TCRETURNmi ||
+ Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
+ Opc == X86::TCRETURNmi64;
+}
+
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
+ unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL;
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
@@ -1453,7 +1493,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWinCFI =
IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
- bool IsFunclet = isFuncletReturnInstr(MBBI);
+ bool IsFunclet = isFuncletReturnInstr(*MBBI);
MachineBasicBlock *TargetMBB = nullptr;
// Get the number of bytes to allocate from the FrameInfo.
@@ -1490,7 +1530,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Callee-saved registers were pushed on stack before the stack was
// realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
+ NumBytes = alignTo(FrameSize, MaxAlign);
// Pop EBP.
BuildMI(MBB, MBBI, DL,
@@ -1589,15 +1629,17 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
- // Add the return addr area delta back since we are not tail calling.
- int Offset = -1 * X86FI->getTCReturnAddrDelta();
- assert(Offset >= 0 && "TCDelta should never be positive");
- if (Offset) {
- MBBI = MBB.getFirstTerminator();
+ if (!isTailCallOpcode(RetOpcode)) {
+ // Add the return addr area delta back since we are not tail calling.
+ int Offset = -1 * X86FI->getTCReturnAddrDelta();
+ assert(Offset >= 0 && "TCDelta should never be positive");
+ if (Offset) {
+ MBBI = MBB.getFirstTerminator();
- // Check for possible merge with preceding ADD instruction.
- Offset += mergeSPUpdates(MBB, MBBI, true);
- emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+ // Check for possible merge with preceding ADD instruction.
+ Offset += mergeSPUpdates(MBB, MBBI, true);
+ emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+ }
}
}
@@ -1689,58 +1731,61 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
return Offset + FPDelta;
}
-// Simplified from getFrameIndexReference keeping only StackPointer cases
-int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
- int FI,
- unsigned &FrameReg) const {
+int
+X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
+ int FI, unsigned &FrameReg,
+ bool IgnoreSPUpdates) const {
+
const MachineFrameInfo *MFI = MF.getFrameInfo();
// Does not include any dynamic realign.
const uint64_t StackSize = MFI->getStackSize();
- {
-#ifndef NDEBUG
- // LLVM arranges the stack as follows:
- // ...
- // ARG2
- // ARG1
- // RETADDR
- // PUSH RBP <-- RBP points here
- // PUSH CSRs
- // ~~~~~~~ <-- possible stack realignment (non-win64)
- // ...
- // STACK OBJECTS
- // ... <-- RSP after prologue points here
- // ~~~~~~~ <-- possible stack realignment (win64)
- //
- // if (hasVarSizedObjects()):
- // ... <-- "base pointer" (ESI/RBX) points here
- // DYNAMIC ALLOCAS
- // ... <-- RSP points here
- //
- // Case 1: In the simple case of no stack realignment and no dynamic
- // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
- // with fixed offsets from RSP.
- //
- // Case 2: In the case of stack realignment with no dynamic allocas, fixed
- // stack objects are addressed with RBP and regular stack objects with RSP.
- //
- // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
- // to address stack arguments for outgoing calls and nothing else. The "base
- // pointer" points to local variables, and RBP points to fixed objects.
- //
- // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
- // answer we give is relative to the SP after the prologue, and not the
- // SP in the middle of the function.
-
- assert((!MFI->isFixedObjectIndex(FI) || !TRI->needsStackRealignment(MF) ||
- STI.isTargetWin64()) &&
- "offset from fixed object to SP is not static");
-
- // We don't handle tail calls, and shouldn't be seeing them either.
- int TailCallReturnAddrDelta =
- MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
- assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
-#endif
- }
+ // LLVM arranges the stack as follows:
+ // ...
+ // ARG2
+ // ARG1
+ // RETADDR
+ // PUSH RBP <-- RBP points here
+ // PUSH CSRs
+ // ~~~~~~~ <-- possible stack realignment (non-win64)
+ // ...
+ // STACK OBJECTS
+ // ... <-- RSP after prologue points here
+ // ~~~~~~~ <-- possible stack realignment (win64)
+ //
+ // if (hasVarSizedObjects()):
+ // ... <-- "base pointer" (ESI/RBX) points here
+ // DYNAMIC ALLOCAS
+ // ... <-- RSP points here
+ //
+ // Case 1: In the simple case of no stack realignment and no dynamic
+ // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
+ // with fixed offsets from RSP.
+ //
+ // Case 2: In the case of stack realignment with no dynamic allocas, fixed
+ // stack objects are addressed with RBP and regular stack objects with RSP.
+ //
+ // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
+ // to address stack arguments for outgoing calls and nothing else. The "base
+ // pointer" points to local variables, and RBP points to fixed objects.
+ //
+ // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
+ // answer we give is relative to the SP after the prologue, and not the
+ // SP in the middle of the function.
+
+ if (MFI->isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) &&
+ !STI.isTargetWin64())
+ return getFrameIndexReference(MF, FI, FrameReg);
+
+ // If !hasReservedCallFrame the function might have SP adjustement in the
+ // body. So, even though the offset is statically known, it depends on where
+ // we are in the function.
+ const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ if (!IgnoreSPUpdates && !TFI->hasReservedCallFrame(MF))
+ return getFrameIndexReference(MF, FI, FrameReg);
+
+ // We don't handle tail calls, and shouldn't be seeing them either.
+ assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
+ "we don't handle this case!");
// Fill in FrameReg output argument.
FrameReg = TRI->getStackRegister();
@@ -1851,16 +1896,37 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
return true;
// Push GPRs. It increases frame size.
+ const MachineFunction &MF = *MBB.getParent();
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i - 1].getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ bool isLiveIn = MRI.isLiveIn(Reg);
+ if (!isLiveIn)
+ MBB.addLiveIn(Reg);
+
+ // Decide whether we can add a kill flag to the use.
+ bool CanKill = !isLiveIn;
+ // Check if any subregister is live-in
+ if (CanKill) {
+ for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
+ if (MRI.isLiveIn(*AReg)) {
+ CanKill = false;
+ break;
+ }
+ }
+ }
+
+ // Do not set a kill flag on values that are also marked as live-in. This
+ // happens with the @llvm-returnaddress intrinsic and with arguments
+ // passed in callee saved registers.
+ // Omitting the kill flags is conservatively correct even if the live-in
+ // is not used after all.
+ BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
.setMIFlag(MachineInstr::FrameSetup);
}
@@ -1891,7 +1957,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (CSI.empty())
return false;
- if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
+ if (isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
// Don't restore CSRs in 32-bit EH funclets. Matches
// spillCalleeSavedRegisters.
if (STI.is32Bit())
@@ -2250,11 +2316,33 @@ void X86FrameLowering::adjustForSegmentedStacks(
checkMBB->addSuccessor(allocMBB);
checkMBB->addSuccessor(&PrologueMBB);
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
+/// Lookup an ERTS parameter in the !hipe.literals named metadata node.
+/// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
+/// to fields it needs, through a named metadata node "hipe.literals" containing
+/// name-value pairs.
+static unsigned getHiPELiteral(
+ NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
+ for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
+ MDNode *Node = HiPELiteralsMD->getOperand(i);
+ if (Node->getNumOperands() != 2) continue;
+ MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
+ ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
+ if (!NodeName || !NodeVal) continue;
+ ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
+ if (ValConst && NodeName->getString() == LiteralName) {
+ return ValConst->getZExtValue();
+ }
+ }
+
+ report_fatal_error("HiPE literal " + LiteralName
+ + " required but not provided");
+}
+
/// Erlang programs may need a special prologue to handle the stack size they
/// might need at runtime. That is because Erlang/OTP does not implement a C
/// stack but uses a custom implementation of hybrid stack/heap architecture.
@@ -2280,7 +2368,14 @@ void X86FrameLowering::adjustForHiPEPrologue(
assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
// HiPE-specific values
- const unsigned HipeLeafWords = 24;
+ NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
+ ->getNamedMetadata("hipe.literals");
+ if (!HiPELiteralsMD)
+ report_fatal_error(
+ "Can't generate HiPE prologue without runtime parameters");
+ const unsigned HipeLeafWords
+ = getHiPELiteral(HiPELiteralsMD,
+ Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
@@ -2300,15 +2395,13 @@ void X86FrameLowering::adjustForHiPEPrologue(
if (MFI->hasCalls()) {
unsigned MoreStackForCalls = 0;
- for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
- MBBI != MBBE; ++MBBI)
- for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
- MI != ME; ++MI) {
- if (!MI->isCall())
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (!MI.isCall())
continue;
// Get callee operand.
- const MachineOperand &MO = MI->getOperand(0);
+ const MachineOperand &MO = MI.getOperand(0);
// Only take account of global function calls (no closures etc.).
if (!MO.isGlobal())
@@ -2334,6 +2427,7 @@ void X86FrameLowering::adjustForHiPEPrologue(
MoreStackForCalls = std::max(MoreStackForCalls,
(HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
}
+ }
MaxStack += MoreStackForCalls;
}
@@ -2353,20 +2447,19 @@ void X86FrameLowering::adjustForHiPEPrologue(
unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
unsigned LEAop, CMPop, CALLop;
+ SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
if (Is64Bit) {
SPReg = X86::RSP;
PReg = X86::RBP;
LEAop = X86::LEA64r;
CMPop = X86::CMP64rm;
CALLop = X86::CALL64pcrel32;
- SPLimitOffset = 0x90;
} else {
SPReg = X86::ESP;
PReg = X86::EBP;
LEAop = X86::LEA32r;
CMPop = X86::CMP32rm;
CALLop = X86::CALLpcrel32;
- SPLimitOffset = 0x4c;
}
ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
@@ -2395,13 +2488,15 @@ void X86FrameLowering::adjustForHiPEPrologue(
incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
incStackMBB->addSuccessor(incStackMBB, {1, 100});
}
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL,
+ int Offset) const {
if (Offset <= 0)
return false;
@@ -2440,7 +2535,8 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
bool IsDef = false;
for (const MachineOperand &MO : Prev->implicit_operands()) {
- if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) {
+ if (MO.isReg() && MO.isDef() &&
+ TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
IsDef = true;
break;
}
@@ -2468,7 +2564,7 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
return true;
}
-void X86FrameLowering::
+MachineBasicBlock::iterator X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
bool reserveCallFrame = hasReservedCallFrame(MF);
@@ -2488,7 +2584,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned StackAlign = getStackAlignment();
- Amount = RoundUpToAlignment(Amount, StackAlign);
+ Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
const Function *Fn = MF.getFunction();
@@ -2512,7 +2608,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
if (Amount == 0)
- return;
+ return I;
// Factor out the amount that gets handled inside the sequence
// (Pushes of argument for frame setup, callee pops for frame destroy)
@@ -2525,13 +2621,23 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
BuildCFI(MBB, I, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
- if (Amount) {
- // Add Amount to SP to destroy a frame, and subtract to setup.
- int Offset = isDestroy ? Amount : -Amount;
-
- if (!(Fn->optForMinSize() &&
- adjustStackWithPops(MBB, I, DL, Offset)))
- BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
+ // Add Amount to SP to destroy a frame, or subtract to setup.
+ int64_t StackAdjustment = isDestroy ? Amount : -Amount;
+ int64_t CfaAdjustment = -StackAdjustment;
+
+ if (StackAdjustment) {
+ // Merge with any previous or following adjustment instruction. Note: the
+ // instructions merged with here do not have CFI, so their stack
+ // adjustments do not feed into CfaAdjustment.
+ StackAdjustment += mergeSPUpdates(MBB, I, true);
+ StackAdjustment += mergeSPUpdates(MBB, I, false);
+
+ if (StackAdjustment) {
+ if (!(Fn->optForMinSize() &&
+ adjustStackWithPops(MBB, I, DL, StackAdjustment)))
+ BuildStackAdjustment(MBB, I, DL, StackAdjustment,
+ /*InEpilogue=*/false);
+ }
}
if (DwarfCFI && !hasFP(MF)) {
@@ -2541,18 +2647,16 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// CFI only for EH purposes or for debugging. EH only requires the CFA
// offset to be correct at each call site, while for debugging we want
// it to be more precise.
- int CFAOffset = Amount;
+
// TODO: When not using precise CFA, we also need to adjust for the
// InternalAmt here.
-
- if (CFAOffset) {
- CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
- BuildCFI(MBB, I, DL,
- MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
+ if (CfaAdjustment) {
+ BuildCFI(MBB, I, DL, MCCFIInstruction::createAdjustCfaOffset(
+ nullptr, CfaAdjustment));
}
}
- return;
+ return I;
}
if (isDestroy && InternalAmt) {
@@ -2562,11 +2666,20 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We are not tracking the stack pointer adjustment by the callee, so make
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
+ MachineBasicBlock::iterator CI = I;
MachineBasicBlock::iterator B = MBB.begin();
- while (I != B && !std::prev(I)->isCall())
- --I;
- BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
+ while (CI != B && !std::prev(CI)->isCall())
+ --CI;
+ BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
}
+
+ return I;
+}
+
+bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
+ assert(MBB.getParent() && "Block is not attached to a function!");
+ const MachineFunction &MF = *MBB.getParent();
+ return !TRI->needsStackRealignment(MF) || !MBB.isLiveIn(X86::EFLAGS);
}
bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
@@ -2604,7 +2717,7 @@ bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool RestoreSP) const {
+ const DebugLoc &DL, bool RestoreSP) const {
assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
assert(STI.is32Bit() && !Uses64BitFramePtr &&
@@ -2664,6 +2777,150 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
return MBBI;
}
+namespace {
+// Struct used by orderFrameObjects to help sort the stack objects.
+struct X86FrameSortingObject {
+ bool IsValid = false; // true if we care about this Object.
+ unsigned ObjectIndex = 0; // Index of Object into MFI list.
+ unsigned ObjectSize = 0; // Size of Object in bytes.
+ unsigned ObjectAlignment = 1; // Alignment of Object in bytes.
+ unsigned ObjectNumUses = 0; // Object static number of uses.
+};
+
+// The comparison function we use for std::sort to order our local
+// stack symbols. The current algorithm is to use an estimated
+// "density". This takes into consideration the size and number of
+// uses each object has in order to roughly minimize code size.
+// So, for example, an object of size 16B that is referenced 5 times
+// will get higher priority than 4 4B objects referenced 1 time each.
+// It's not perfect and we may be able to squeeze a few more bytes out of
+// it (for example : 0(esp) requires fewer bytes, symbols allocated at the
+// fringe end can have special consideration, given their size is less
+// important, etc.), but the algorithmic complexity grows too much to be
+// worth the extra gains we get. This gets us pretty close.
+// The final order leaves us with objects with highest priority going
+// at the end of our list.
+struct X86FrameSortingComparator {
+ inline bool operator()(const X86FrameSortingObject &A,
+ const X86FrameSortingObject &B) {
+ uint64_t DensityAScaled, DensityBScaled;
+
+ // For consistency in our comparison, all invalid objects are placed
+ // at the end. This also allows us to stop walking when we hit the
+ // first invalid item after it's all sorted.
+ if (!A.IsValid)
+ return false;
+ if (!B.IsValid)
+ return true;
+
+ // The density is calculated by doing :
+ // (double)DensityA = A.ObjectNumUses / A.ObjectSize
+ // (double)DensityB = B.ObjectNumUses / B.ObjectSize
+ // Since this approach may cause inconsistencies in
+ // the floating point <, >, == comparisons, depending on the floating
+ // point model with which the compiler was built, we're going
+ // to scale both sides by multiplying with
+ // A.ObjectSize * B.ObjectSize. This ends up factoring away
+ // the division and, with it, the need for any floating point
+ // arithmetic.
+ DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
+ static_cast<uint64_t>(B.ObjectSize);
+ DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
+ static_cast<uint64_t>(A.ObjectSize);
+
+ // If the two densities are equal, prioritize highest alignment
+ // objects. This allows for similar alignment objects
+ // to be packed together (given the same density).
+ // There's room for improvement here, also, since we can pack
+ // similar alignment (different density) objects next to each
+ // other to save padding. This will also require further
+ // complexity/iterations, and the overall gain isn't worth it,
+ // in general. Something to keep in mind, though.
+ if (DensityAScaled == DensityBScaled)
+ return A.ObjectAlignment < B.ObjectAlignment;
+
+ return DensityAScaled < DensityBScaled;
+ }
+};
+} // namespace
+
+// Order the symbols in the local stack.
+// We want to place the local stack objects in some sort of sensible order.
+// The heuristic we use is to try and pack them according to static number
+// of uses and size of object in order to minimize code size.
+void X86FrameLowering::orderFrameObjects(
+ const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Don't waste time if there's nothing to do.
+ if (ObjectsToAllocate.empty())
+ return;
+
+ // Create an array of all MFI objects. We won't need all of these
+ // objects, but we're going to create a full array of them to make
+ // it easier to index into when we're counting "uses" down below.
+ // We want to be able to easily/cheaply access an object by simply
+ // indexing into it, instead of having to search for it every time.
+ std::vector<X86FrameSortingObject> SortingObjects(MFI->getObjectIndexEnd());
+
+ // Walk the objects we care about and mark them as such in our working
+ // struct.
+ for (auto &Obj : ObjectsToAllocate) {
+ SortingObjects[Obj].IsValid = true;
+ SortingObjects[Obj].ObjectIndex = Obj;
+ SortingObjects[Obj].ObjectAlignment = MFI->getObjectAlignment(Obj);
+ // Set the size.
+ int ObjectSize = MFI->getObjectSize(Obj);
+ if (ObjectSize == 0)
+ // Variable size. Just use 4.
+ SortingObjects[Obj].ObjectSize = 4;
+ else
+ SortingObjects[Obj].ObjectSize = ObjectSize;
+ }
+
+ // Count the number of uses for each object.
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
+ for (const MachineOperand &MO : MI.operands()) {
+ // Check to see if it's a local stack symbol.
+ if (!MO.isFI())
+ continue;
+ int Index = MO.getIndex();
+ // Check to see if it falls within our range, and is tagged
+ // to require ordering.
+ if (Index >= 0 && Index < MFI->getObjectIndexEnd() &&
+ SortingObjects[Index].IsValid)
+ SortingObjects[Index].ObjectNumUses++;
+ }
+ }
+ }
+
+ // Sort the objects using X86FrameSortingAlgorithm (see its comment for
+ // info).
+ std::stable_sort(SortingObjects.begin(), SortingObjects.end(),
+ X86FrameSortingComparator());
+
+ // Now modify the original list to represent the final order that
+ // we want. The order will depend on whether we're going to access them
+ // from the stack pointer or the frame pointer. For SP, the list should
+ // end up with the END containing objects that we want with smaller offsets.
+ // For FP, it should be flipped.
+ int i = 0;
+ for (auto &Obj : SortingObjects) {
+ // All invalid items are sorted at the end, so it's safe to stop.
+ if (!Obj.IsValid)
+ break;
+ ObjectsToAllocate[i++] = Obj.ObjectIndex;
+ }
+
+ // Flip it if we're accessing off of the FP.
+ if (!TRI->needsStackRealignment(MF) && hasFP(MF))
+ std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
+}
+
+
unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
// RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
unsigned Offset = 16;
@@ -2691,14 +2948,30 @@ void X86FrameLowering::processFunctionBeforeFrameFinalized(
// were no fixed objects, use offset -SlotSize, which is immediately after the
// return address. Fixed objects have negative frame indices.
MachineFrameInfo *MFI = MF.getFrameInfo();
+ WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
int64_t MinFixedObjOffset = -SlotSize;
for (int I = MFI->getObjectIndexBegin(); I < 0; ++I)
MinFixedObjOffset = std::min(MinFixedObjOffset, MFI->getObjectOffset(I));
+ for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
+ for (WinEHHandlerType &H : TBME.HandlerArray) {
+ int FrameIndex = H.CatchObj.FrameIndex;
+ if (FrameIndex != INT_MAX) {
+ // Ensure alignment.
+ unsigned Align = MFI->getObjectAlignment(FrameIndex);
+ MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
+ MinFixedObjOffset -= MFI->getObjectSize(FrameIndex);
+ MFI->setObjectOffset(FrameIndex, MinFixedObjOffset);
+ }
+ }
+ }
+
+ // Ensure alignment.
+ MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
int UnwindHelpFI =
MFI->CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
- MF.getWinEHFuncInfo()->UnwindHelpFrameIdx = UnwindHelpFI;
+ EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
// Store -2 into UnwindHelp on function entry. We have to scan forwards past
// other frame setup instructions.