diff options
Diffstat (limited to 'lib/Target/X86/X86ISelLowering.h')
-rw-r--r-- | lib/Target/X86/X86ISelLowering.h | 188 |
1 files changed, 124 insertions, 64 deletions
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index ffed46c733aa..2f7b8ba6e694 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -85,7 +85,7 @@ namespace llvm { /// as. FST, - /// CALL/TAILCALL - These operations represent an abstract X86 call + /// CALL - These operations represent an abstract X86 call /// instruction, which includes a bunch of information. In particular the /// operands of these node are: /// @@ -102,12 +102,8 @@ namespace llvm { /// #1 - The first register result value (optional) /// #2 - The second register result value (optional) /// - /// The CALL vs TAILCALL distinction boils down to whether the callee is - /// known not to modify the caller's stack frame, as is standard with - /// LLVM. CALL, - TAILCALL, - + /// RDTSC_DAG - This operation implements the lowering for /// readcyclecounter RDTSC_DAG, @@ -208,17 +204,6 @@ namespace llvm { LCMPXCHG_DAG, LCMPXCHG8_DAG, - // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, - // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - - // Atomic 64-bit binary operations. - ATOMADD64_DAG, - ATOMSUB64_DAG, - ATOMOR64_DAG, - ATOMXOR64_DAG, - ATOMAND64_DAG, - ATOMNAND64_DAG, - ATOMSWAP64_DAG, - // FNSTCW16m - Store FP control world into i16 memory. FNSTCW16m, @@ -241,10 +226,29 @@ namespace llvm { // ADD, SUB, SMUL, UMUL, etc. - Arithmetic operations with FLAGS results. ADD, SUB, SMUL, UMUL, - INC, DEC, + INC, DEC, OR, XOR, AND, // MUL_IMM - X86 specific multiply by immediate. - MUL_IMM + MUL_IMM, + + // PTEST - Vector bitwise comparisons + PTEST, + + // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, + // according to %al. An operator is needed so that this can be expanded + // with control flow. + VASTART_SAVE_XMM_REGS, + + // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, + // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - + // Atomic 64-bit binary operations. + ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, + ATOMSUB64_DAG, + ATOMOR64_DAG, + ATOMXOR64_DAG, + ATOMAND64_DAG, + ATOMNAND64_DAG, + ATOMSWAP64_DAG }; } @@ -333,6 +337,15 @@ namespace llvm { /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW /// instructions. unsigned getShufflePSHUFLWImmediate(SDNode *N); + + /// isZeroNode - Returns true if Elt is a constant zero or a floating point + /// constant +0.0. + bool isZeroNode(SDValue Elt); + + /// isOffsetSuitableForCodeModel - Returns true of the given offset can be + /// fit into displacement field of the instruction. + bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, + bool hasSymbolicDisplacement = true); } //===--------------------------------------------------------------------===// @@ -374,12 +387,17 @@ namespace llvm { /// getOptimalMemOpType - Returns the target specific optimal type for load /// and store operations as a result of memset, memcpy, and memmove - /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for + /// lowering. It returns EVT::iAny if SelectionDAG should be responsible for /// determining it. - virtual - MVT getOptimalMemOpType(uint64_t Size, unsigned Align, - bool isSrcConst, bool isSrcStr, - SelectionDAG &DAG) const; + virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr, + SelectionDAG &DAG) const; + + /// allowsUnalignedMemoryAccesses - Returns true if the target allows + /// unaligned memory accesses. of the specified type. + virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { + return true; + } /// LowerOperation - Provide custom lowering hooks for some operations. /// @@ -395,7 +413,8 @@ namespace llvm { virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, - MachineBasicBlock *MBB) const; + MachineBasicBlock *MBB, + DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const; /// getTargetNodeName - This method returns the name of a target specific @@ -403,7 +422,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType - virtual MVT getSetCCResultType(MVT VT) const; + virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const; /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the @@ -420,13 +439,15 @@ namespace llvm { SDValue getReturnAddressFrameIndex(SelectionDAG &DAG); + virtual bool ExpandInlineAsm(CallInst *CI) const; + ConstraintType getConstraintType(const std::string &Constraint) const; std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT VT) const; + EVT VT) const; - virtual const char *LowerXConstraint(MVT ConstraintVT) const; + virtual const char *LowerXConstraint(EVT ConstraintVT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. If hasMemory is @@ -444,7 +465,7 @@ namespace llvm { /// error, this returns a register number of 0. std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT VT) const; + EVT VT) const; /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. @@ -454,7 +475,7 @@ namespace llvm { /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in /// register EAX to i16 by referencing its sub-register AX. virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const; - virtual bool isTruncateFree(MVT VT1, MVT VT2) const; + virtual bool isTruncateFree(EVT VT1, EVT VT2) const; /// isZExtFree - Return true if any actual instruction that defines a /// value of type Ty1 implicit zero-extends the value to Ty2 in the result @@ -465,31 +486,31 @@ namespace llvm { /// all instructions that define 32-bit values implicit zero-extend the /// result out to 64 bits. virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const; - virtual bool isZExtFree(MVT VT1, MVT VT2) const; + virtual bool isZExtFree(EVT VT1, EVT VT2) const; /// isNarrowingProfitable - Return true if it's profitable to narrow /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow /// from i32 to i8 but not from i32 to i16. - virtual bool isNarrowingProfitable(MVT VT1, MVT VT2) const; + virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; /// isShuffleMaskLegal - Targets can use this to indicate that they only /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask /// values are assumed to be legal. virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, - MVT VT) const; + EVT VT) const; /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is /// used by Targets can use this to indicate if there is a suitable /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, - MVT VT) const; + EVT VT) const; /// ShouldShrinkFPConstant - If true, then instruction selection should /// seek to shrink the FP constant of the specified type to a smaller type /// in order to save space and / or reduce runtime. - virtual bool ShouldShrinkFPConstant(MVT VT) const { + virtual bool ShouldShrinkFPConstant(EVT VT) const { // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more // expensive than a straight movsd. On the other hand, it's important to // shrink long double fp constant since fldt is very slow. @@ -497,11 +518,14 @@ namespace llvm { } /// IsEligibleForTailCallOptimization - Check whether the call is eligible - /// for tail call optimization. Target which want to do tail call + /// for tail call optimization. Targets which want to do tail call /// optimization should implement this function. - virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall, - SDValue Ret, - SelectionDAG &DAG) const; + virtual bool + IsEligibleForTailCallOptimization(SDValue Callee, + CallingConv::ID CalleeCC, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + SelectionDAG& DAG) const; virtual const X86Subtarget* getSubtarget() { return Subtarget; @@ -509,17 +533,17 @@ namespace llvm { /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is /// computed in an SSE register, not on the X87 floating point stack. - bool isScalarFPTypeInSSEReg(MVT VT) const { + bool isScalarFPTypeInSSEReg(EVT VT) const { return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 } /// getWidenVectorType: given a vector type, returns the type to widen /// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. - /// If there is no vector type that we want to widen to, returns MVT::Other + /// If there is no vector type that we want to widen to, returns EVT::Other /// When and were to widen is target dependent based on the cost of /// scalarizing vs using the wider vector type. - virtual MVT getWidenVectorType(MVT VT) const; + virtual EVT getWidenVectorType(EVT VT) const; /// createFastISel - This method returns a target specific FastISel object, /// or null if the target does not support "fast" ISel. @@ -554,28 +578,30 @@ namespace llvm { bool X86ScalarSSEf32; bool X86ScalarSSEf64; - SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, - unsigned CallingConv, SelectionDAG &DAG); - - SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG, - const CCValAssign &VA, MachineFrameInfo *MFI, - unsigned CC, SDValue Root, unsigned i); - - SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG, - const SDValue &StackPtr, - const CCValAssign &VA, SDValue Chain, - SDValue Arg, ISD::ArgFlagsTy Flags); + SDValue LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + SDValue LowerMemArgument(SDValue Chain, + CallingConv::ID CallConv, + const SmallVectorImpl<ISD::InputArg> &ArgInfo, + DebugLoc dl, SelectionDAG &DAG, + const CCValAssign &VA, MachineFrameInfo *MFI, + unsigned i); + SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, + DebugLoc dl, SelectionDAG &DAG, + const CCValAssign &VA, + ISD::ArgFlagsTy Flags); // Call lowering helpers. - bool IsCalleePop(bool isVarArg, unsigned CallingConv); - bool CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall); - bool CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall); + bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv); SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall, bool Is64Bit, int FPDiff, DebugLoc dl); - CCAssignFn *CCAssignFnForNode(unsigned CallingConv) const; - NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op); + CCAssignFn *CCAssignFnForNode(CallingConv::ID CallConv) const; + NameDecorationStyle NameDecorationForCallConv(CallingConv::ID CallConv); unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG); std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, @@ -595,7 +621,7 @@ namespace llvm { SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG); SDValue LowerShift(SDValue Op, SelectionDAG &DAG); - SDValue BuildFILD(SDValue Op, MVT SrcVT, SDValue Chain, SDValue StackSlot, + SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, SelectionDAG &DAG); SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG); SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG); @@ -612,10 +638,7 @@ namespace llvm { SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG); SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG); SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); - SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); - SDValue LowerRET(SDValue Op, SelectionDAG &DAG); SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG); - SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG); SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG); SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG); @@ -635,6 +658,26 @@ namespace llvm { SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG); SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG); + virtual SDValue + LowerFormalArguments(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + virtual SDValue + LowerCall(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, bool isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals); + + virtual SDValue + LowerReturn(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + DebugLoc dl, SelectionDAG &DAG); + void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG, unsigned NewOp); @@ -651,9 +694,17 @@ namespace llvm { const Value *DstSV, uint64_t DstSVOff, const Value *SrcSV, uint64_t SrcSVOff); + /// Utility function to emit string processing sse4.2 instructions + /// that return in xmm0. + /// This takes the instruction to expand, the associated machine basic + /// block, the number of args, and whether or not the second arg is + /// in memory or not. + MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB, + unsigned argNum, bool inMem) const; + /// Utility function to emit atomic bitwise operations (and, or, xor). - // It takes the bitwise instruction to expand, the associated machine basic - // block, and the associated X86 opcodes for reg/reg and reg/imm. + /// It takes the bitwise instruction to expand, the associated machine basic + /// block, and the associated X86 opcodes for reg/reg and reg/imm. MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter( MachineInstr *BInstr, MachineBasicBlock *BB, @@ -683,6 +734,15 @@ namespace llvm { MachineBasicBlock *BB, unsigned cmovOpc) const; + /// Utility function to emit the xmm reg save portion of va_start. + MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter( + MachineInstr *BInstr, + MachineBasicBlock *BB) const; + + MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, + MachineBasicBlock *BB, + DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const; + /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent, for use with the given x86 condition code. SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG); |