aboutsummaryrefslogtreecommitdiff
path: root/include/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm')
-rw-r--r--include/llvm/ADT/DAGDeltaAlgorithm.h75
-rw-r--r--include/llvm/ADT/DenseMap.h1
-rw-r--r--include/llvm/ADT/EquivalenceClasses.h2
-rw-r--r--include/llvm/ADT/FoldingSet.h83
-rw-r--r--include/llvm/ADT/ImmutableIntervalMap.h12
-rw-r--r--include/llvm/ADT/PostOrderIterator.h17
-rw-r--r--include/llvm/ADT/SetVector.h8
-rw-r--r--include/llvm/ADT/SmallPtrSet.h49
-rw-r--r--include/llvm/ADT/SmallVector.h160
-rw-r--r--include/llvm/ADT/Statistic.h4
-rw-r--r--include/llvm/ADT/Triple.h7
-rw-r--r--include/llvm/ADT/ValueMap.h6
-rw-r--r--include/llvm/ADT/ilist.h1
-rw-r--r--include/llvm/AbstractTypeUser.h3
-rw-r--r--include/llvm/Analysis/AliasAnalysis.h21
-rw-r--r--include/llvm/Analysis/CFGPrinter.h6
-rw-r--r--include/llvm/Analysis/CaptureTracking.h6
-rw-r--r--include/llvm/Analysis/CodeMetrics.h72
-rw-r--r--include/llvm/Analysis/DebugInfo.h9
-rw-r--r--include/llvm/Analysis/DominatorInternals.h18
-rw-r--r--include/llvm/Analysis/Dominators.h22
-rw-r--r--include/llvm/Analysis/InlineCost.h45
-rw-r--r--include/llvm/Analysis/IntervalIterator.h22
-rw-r--r--include/llvm/Analysis/Loads.h51
-rw-r--r--include/llvm/Analysis/LoopInfo.h59
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h4
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h8
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h15
-rw-r--r--include/llvm/Analysis/ValueTracking.h2
-rw-r--r--include/llvm/Bitcode/ReaderWriter.h3
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h142
-rw-r--r--include/llvm/CodeGen/CallingConvLower.h9
-rw-r--r--include/llvm/CodeGen/FastISel.h61
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h154
-rw-r--r--include/llvm/CodeGen/GCMetadata.h96
-rw-r--r--include/llvm/CodeGen/GCMetadataPrinter.h28
-rw-r--r--include/llvm/CodeGen/ISDOpcodes.h7
-rw-r--r--include/llvm/CodeGen/LinkAllCodegenComponents.h1
-rw-r--r--include/llvm/CodeGen/LiveInterval.h101
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h17
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h21
-rw-r--r--include/llvm/CodeGen/MachineFrameInfo.h12
-rw-r--r--include/llvm/CodeGen/MachineFunctionPass.h9
-rw-r--r--include/llvm/CodeGen/MachineInstr.h30
-rw-r--r--include/llvm/CodeGen/MachineJumpTableInfo.h2
-rw-r--r--include/llvm/CodeGen/MachineLoopInfo.h4
-rw-r--r--include/llvm/CodeGen/MachineOperand.h16
-rw-r--r--include/llvm/CodeGen/MachineRegisterInfo.h15
-rw-r--r--include/llvm/CodeGen/Passes.h15
-rw-r--r--include/llvm/CodeGen/PostRAHazardRecognizer.h94
-rw-r--r--include/llvm/CodeGen/RegisterCoalescer.h90
-rw-r--r--include/llvm/CodeGen/RegisterScavenging.h11
-rw-r--r--include/llvm/CodeGen/RuntimeLibcalls.h34
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h39
-rw-r--r--include/llvm/CodeGen/SelectionDAGISel.h15
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h133
-rw-r--r--include/llvm/CodeGen/SlotIndexes.h61
-rw-r--r--include/llvm/Config/config.h.in3
-rw-r--r--include/llvm/ExecutionEngine/ExecutionEngine.h4
-rw-r--r--include/llvm/GlobalValue.h17
-rw-r--r--include/llvm/InlineAsm.h3
-rw-r--r--include/llvm/InstrTypes.h10
-rw-r--r--include/llvm/Instructions.h77
-rw-r--r--include/llvm/IntrinsicInst.h64
-rw-r--r--include/llvm/Intrinsics.td6
-rw-r--r--include/llvm/LinkAllPasses.h1
-rw-r--r--include/llvm/MC/MCAssembler.h4
-rw-r--r--include/llvm/MC/MCContext.h22
-rw-r--r--include/llvm/MC/MCDirectives.h3
-rw-r--r--include/llvm/MC/MCObjectStreamer.h56
-rw-r--r--include/llvm/MC/MCObjectWriter.h2
-rw-r--r--include/llvm/MC/MCParser/AsmLexer.h6
-rw-r--r--include/llvm/MC/MCParser/AsmParser.h42
-rw-r--r--include/llvm/MC/MCParser/MCAsmLexer.h7
-rw-r--r--include/llvm/MC/MCParser/MCAsmParser.h32
-rw-r--r--include/llvm/MC/MCParser/MCAsmParserExtension.h66
-rw-r--r--include/llvm/MC/MCSection.h6
-rw-r--r--include/llvm/MC/MCSectionCOFF.h52
-rw-r--r--include/llvm/MC/MCStreamer.h10
-rw-r--r--include/llvm/MC/SectionKind.h64
-rw-r--r--include/llvm/Module.h29
-rw-r--r--include/llvm/Pass.h27
-rw-r--r--include/llvm/PassAnalysisSupport.h13
-rw-r--r--include/llvm/PassManagers.h5
-rw-r--r--include/llvm/PassSupport.h8
-rw-r--r--include/llvm/Support/CFG.h2
-rw-r--r--include/llvm/Support/COFF.h183
-rw-r--r--include/llvm/Support/CallSite.h12
-rw-r--r--include/llvm/Support/Dwarf.h87
-rw-r--r--include/llvm/Support/ELF.h264
-rw-r--r--include/llvm/Support/IRBuilder.h113
-rw-r--r--include/llvm/Support/IRReader.h6
-rw-r--r--include/llvm/Support/MemoryBuffer.h31
-rw-r--r--include/llvm/Support/Timer.h6
-rw-r--r--include/llvm/Support/raw_ostream.h4
-rw-r--r--include/llvm/SymbolTableListTraits.h5
-rw-r--r--include/llvm/System/DataTypes.h.cmake52
-rw-r--r--include/llvm/System/Path.h8
-rw-r--r--include/llvm/Target/Target.td28
-rw-r--r--include/llvm/Target/TargetAsmParser.h4
-rw-r--r--include/llvm/Target/TargetCallingConv.h142
-rw-r--r--include/llvm/Target/TargetInstrDesc.h6
-rw-r--r--include/llvm/Target/TargetInstrInfo.h141
-rw-r--r--include/llvm/Target/TargetInstrItineraries.h3
-rw-r--r--include/llvm/Target/TargetLowering.h282
-rw-r--r--include/llvm/Target/TargetOpcodes.h48
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h60
-rw-r--r--include/llvm/Transforms/IPO.h5
-rw-r--r--include/llvm/Transforms/Utils/BasicBlockUtils.h18
-rw-r--r--include/llvm/Transforms/Utils/BuildLibCalls.h4
-rw-r--r--include/llvm/Transforms/Utils/Cloning.h30
-rw-r--r--include/llvm/Transforms/Utils/Local.h11
-rw-r--r--include/llvm/Type.h6
-rw-r--r--include/llvm/Use.h1
-rw-r--r--include/llvm/Value.h4
115 files changed, 2880 insertions, 1333 deletions
diff --git a/include/llvm/ADT/DAGDeltaAlgorithm.h b/include/llvm/ADT/DAGDeltaAlgorithm.h
new file mode 100644
index 000000000000..99ed15c0d60f
--- /dev/null
+++ b/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -0,0 +1,75 @@
+//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
+#define LLVM_ADT_DAGDELTAALGORITHM_H
+
+#include <vector>
+#include <set>
+
+namespace llvm {
+
+/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
+/// directed acyclic graphs using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element not required by the dependencies on the other
+/// elements would falsify the predicate.
+///
+/// The DAG should be used to represent dependencies in the changes which are
+/// likely to hold across the predicate function. That is, for a particular
+/// changeset S and predicate P:
+///
+/// P(S) => P(S union pred(S))
+///
+/// The minization algorithm uses this dependency information to attempt to
+/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
+/// is not required to satisfy this property, but the algorithm will run
+/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
+/// for more information on the properties which the predicate function itself
+/// should satisfy.
+class DAGDeltaAlgorithm {
+public:
+ typedef unsigned change_ty;
+ typedef std::pair<change_ty, change_ty> edge_ty;
+
+ // FIXME: Use a decent data structure.
+ typedef std::set<change_ty> changeset_ty;
+ typedef std::vector<changeset_ty> changesetlist_ty;
+
+public:
+ virtual ~DAGDeltaAlgorithm() {}
+
+ /// Run - Minimize the DAG formed by the \arg Changes vertices and the \arg
+ /// Dependencies edges by executing \see ExecuteOneTest() on subsets of
+ /// changes and returning the smallest set which still satisfies the test
+ /// predicate and the input \arg Dependencies.
+ ///
+ /// \param Changes The list of changes.
+ ///
+ /// \param Dependencies The list of dependencies amongst changes. For each
+ /// (x,y) in \arg Dependencies, both x and y must be in \arg Changes. The
+ /// minimization algorithm guarantees that for each tested changed set S, x
+ /// \in S implies y \in S. It is an error to have cyclic dependencies.
+ changeset_ty Run(const changeset_ty &Changes,
+ const std::vector<edge_ty> &Dependencies);
+
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets,
+ const changeset_ty &Required) {}
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h
index 5c994734193c..c53e255e1c7c 100644
--- a/include/llvm/ADT/DenseMap.h
+++ b/include/llvm/ADT/DenseMap.h
@@ -22,6 +22,7 @@
#include <new>
#include <utility>
#include <cassert>
+#include <cstddef>
#include <cstring>
namespace llvm {
diff --git a/include/llvm/ADT/EquivalenceClasses.h b/include/llvm/ADT/EquivalenceClasses.h
index 91a14294516e..07a5edfdb6ca 100644
--- a/include/llvm/ADT/EquivalenceClasses.h
+++ b/include/llvm/ADT/EquivalenceClasses.h
@@ -169,7 +169,7 @@ public:
/// getOrInsertLeaderValue - Return the leader for the specified value that is
/// in the set. If the member is not in the set, it is inserted, then
/// returned.
- const ElemTy &getOrInsertLeaderValue(const ElemTy &V) const {
+ const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
member_iterator MI = findLeader(insert(V));
assert(MI != member_end() && "Value is not in the set!");
return *MI;
diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h
index e8979bb076de..fc8490abf739 100644
--- a/include/llvm/ADT/FoldingSet.h
+++ b/include/llvm/ADT/FoldingSet.h
@@ -166,6 +166,14 @@ public:
/// FindNodeOrInsertPos.
void InsertNode(Node *N, void *InsertPos);
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(Node *N) {
+ Node *Inserted = GetOrInsertNode(N);
+ (void)Inserted;
+ assert(Inserted == N && "Node already inserted!");
+ }
+
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return NumNodes; }
@@ -196,6 +204,10 @@ protected:
template<typename T> struct FoldingSetTrait {
static inline void Profile(const T& X, FoldingSetNodeID& ID) { X.Profile(ID);}
static inline void Profile(T& X, FoldingSetNodeID& ID) { X.Profile(ID); }
+ template <typename Ctx>
+ static inline void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+ X.Profile(ID, Context);
+ }
};
//===--------------------------------------------------------------------===//
@@ -322,6 +334,77 @@ public:
};
//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes. Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+/// void Profile(llvm::FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet : public FoldingSetImpl {
+ // Unfortunately, this can't derive from FoldingSet<T> because the
+ // construction vtable for FoldingSet<T> requires
+ // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+ // requires a single-argument T::Profile().
+
+private:
+ Ctx Context;
+
+ /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ virtual void GetNodeProfile(FoldingSetNodeID &ID,
+ FoldingSetImpl::Node *N) const {
+ T *TN = static_cast<T *>(N);
+
+ // We must use explicit template arguments in case Ctx is a
+ // reference type.
+ FoldingSetTrait<T>::template Profile<Ctx>(*TN, ID, Context);
+ }
+
+public:
+ explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+ : FoldingSetImpl(Log2InitSize), Context(Context)
+ {}
+
+ Ctx getContext() const { return Context; }
+
+
+ typedef FoldingSetIterator<T> iterator;
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetIterator<const T> const_iterator;
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetBucketIterator<T> bucket_iterator;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N'
+ /// and return it instead.
+ T *GetOrInsertNode(Node *N) {
+ return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it
+ /// exists, return it. If not, return the insertion token that will
+ /// make insertion faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
+ }
+};
+
+//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
diff --git a/include/llvm/ADT/ImmutableIntervalMap.h b/include/llvm/ADT/ImmutableIntervalMap.h
index f33fb1eb0a82..7aa315570f7c 100644
--- a/include/llvm/ADT/ImmutableIntervalMap.h
+++ b/include/llvm/ADT/ImmutableIntervalMap.h
@@ -125,9 +125,11 @@ private:
key_type_ref KCurrent = ImutInfo::KeyOfValue(this->Value(T));
if (ImutInfo::isLess(K, KCurrent))
- return this->Balance(Add_internal(V, this->Left(T)), this->Value(T), this->Right(T));
+ return this->Balance(Add_internal(V, this->Left(T)), this->Value(T),
+ this->Right(T));
else
- return this->Balance(this->Left(T), this->Value(T), Add_internal(V, this->Right(T)));
+ return this->Balance(this->Left(T), this->Value(T),
+ Add_internal(V, this->Right(T)));
}
// Remove all overlaps from T.
@@ -150,9 +152,11 @@ private:
// If current key does not overlap the inserted key.
if (CurrentK.getStart() > K.getEnd())
- return this->Balance(RemoveOverlap(this->Left(T), K, Changed), this->Value(T), this->Right(T));
+ return this->Balance(RemoveOverlap(this->Left(T), K, Changed),
+ this->Value(T), this->Right(T));
else if (CurrentK.getEnd() < K.getStart())
- return this->Balance(this->Left(T), this->Value(T), RemoveOverlap(this->Right(T), K, Changed));
+ return this->Balance(this->Left(T), this->Value(T),
+ RemoveOverlap(this->Right(T), K, Changed));
// Current key overlaps with the inserted key.
// Remove the current key.
diff --git a/include/llvm/ADT/PostOrderIterator.h b/include/llvm/ADT/PostOrderIterator.h
index 8315bc9f9ed5..47e5b2bd4ad0 100644
--- a/include/llvm/ADT/PostOrderIterator.h
+++ b/include/llvm/ADT/PostOrderIterator.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <set>
-#include <stack>
#include <vector>
namespace llvm {
@@ -52,21 +51,21 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
// VisitStack - Used to maintain the ordering. Top = current block
// First element is basic block pointer, second is the 'next child' to visit
- std::stack<std::pair<NodeType *, ChildItTy> > VisitStack;
+ std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
void traverseChild() {
- while (VisitStack.top().second != GT::child_end(VisitStack.top().first)) {
- NodeType *BB = *VisitStack.top().second++;
+ while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+ NodeType *BB = *VisitStack.back().second++;
if (!this->Visited.count(BB)) { // If the block is not visited...
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
inline po_iterator(NodeType *BB) {
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
inline po_iterator() {} // End is when stack is empty.
@@ -75,7 +74,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
po_iterator_storage<SetType, ExtStorage>(S) {
if(!S.count(BB)) {
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
}
@@ -102,7 +101,7 @@ public:
inline bool operator!=(const _Self& x) const { return !operator==(x); }
inline pointer operator*() const {
- return VisitStack.top().first;
+ return VisitStack.back().first;
}
// This is a nonstandard operator-> that dereferences the pointer an extra
@@ -112,7 +111,7 @@ public:
inline NodeType *operator->() const { return operator*(); }
inline _Self& operator++() { // Preincrement
- VisitStack.pop();
+ VisitStack.pop_back();
if (!VisitStack.empty())
traverseChild();
return *this;
diff --git a/include/llvm/ADT/SetVector.h b/include/llvm/ADT/SetVector.h
index fab133af4c03..bf8286c1d840 100644
--- a/include/llvm/ADT/SetVector.h
+++ b/include/llvm/ADT/SetVector.h
@@ -143,6 +143,14 @@ public:
vector_.pop_back();
}
+ bool operator==(const SetVector &that) const {
+ return vector_ == that.vector_;
+ }
+
+ bool operator!=(const SetVector &that) const {
+ return vector_ != that.vector_;
+ }
+
private:
set_type set_; ///< The set.
vector_type vector_; ///< The vector.
diff --git a/include/llvm/ADT/SmallPtrSet.h b/include/llvm/ADT/SmallPtrSet.h
index ef0812592be1..424bdba5a20e 100644
--- a/include/llvm/ADT/SmallPtrSet.h
+++ b/include/llvm/ADT/SmallPtrSet.h
@@ -46,8 +46,10 @@ class SmallPtrSetIteratorImpl;
class SmallPtrSetImpl {
friend class SmallPtrSetIteratorImpl;
protected:
- /// CurArray - This is the current set of buckets. If it points to
- /// SmallArray, then the set is in 'small mode'.
+ /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+ const void **SmallArray;
+ /// CurArray - This is the current set of buckets. If equal to SmallArray,
+ /// then the set is in 'small mode'.
const void **CurArray;
/// CurArraySize - The allocated size of CurArray, always a power of two.
/// Note that CurArray points to an array that has CurArraySize+1 elements in
@@ -57,15 +59,13 @@ protected:
// If small, this is # elts allocated consequtively
unsigned NumElements;
unsigned NumTombstones;
- const void *SmallArray[1]; // Must be last ivar.
// Helper to copy construct a SmallPtrSet.
- SmallPtrSetImpl(const SmallPtrSetImpl& that);
- explicit SmallPtrSetImpl(unsigned SmallSize) {
+ SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl& that);
+ explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize) :
+ SmallArray(SmallStorage), CurArray(SmallStorage), CurArraySize(SmallSize) {
assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
"Initial size must be a power of two!");
- CurArray = &SmallArray[0];
- CurArraySize = SmallSize;
// The end pointer, always valid, is set to a valid element to help the
// iterator.
CurArray[SmallSize] = 0;
@@ -123,7 +123,7 @@ protected:
}
private:
- bool isSmall() const { return CurArray == &SmallArray[0]; }
+ bool isSmall() const { return CurArray == SmallArray; }
unsigned Hash(const void *Ptr) const {
return static_cast<unsigned>(((uintptr_t)Ptr >> 4) & (CurArraySize-1));
@@ -199,29 +199,29 @@ public:
}
};
-/// NextPowerOfTwo - This is a helper template that rounds N up to the next
-/// power of two.
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
template<unsigned N>
-struct NextPowerOfTwo;
+struct RoundUpToPowerOfTwo;
-/// NextPowerOfTwoH - If N is not a power of two, increase it. This is a helper
-/// template used to implement NextPowerOfTwo.
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
template<unsigned N, bool isPowerTwo>
-struct NextPowerOfTwoH {
+struct RoundUpToPowerOfTwoH {
enum { Val = N };
};
template<unsigned N>
-struct NextPowerOfTwoH<N, false> {
+struct RoundUpToPowerOfTwoH<N, false> {
enum {
// We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
// the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
- Val = NextPowerOfTwo<(N|(N-1)) + 1>::Val
+ Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
};
};
template<unsigned N>
-struct NextPowerOfTwo {
- enum { Val = NextPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+struct RoundUpToPowerOfTwo {
+ enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};
@@ -232,16 +232,17 @@ struct NextPowerOfTwo {
template<class PtrType, unsigned SmallSize>
class SmallPtrSet : public SmallPtrSetImpl {
// Make sure that SmallSize is a power of two, round up if not.
- enum { SmallSizePowTwo = NextPowerOfTwo<SmallSize>::Val };
- void *SmallArray[SmallSizePowTwo];
+ enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+ /// SmallStorage - Fixed size storage used in 'small mode'. The extra element
+ /// ensures that the end iterator actually points to valid memory.
+ const void *SmallStorage[SmallSizePowTwo+1];
typedef PointerLikeTypeTraits<PtrType> PtrTraits;
public:
- SmallPtrSet() : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {}
- SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(that) {}
+ SmallPtrSet() : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {}
+ SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(SmallStorage, that) {}
template<typename It>
- SmallPtrSet(It I, It E)
- : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {
+ SmallPtrSet(It I, It E) : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {
insert(I, E);
}
diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h
index 18c8619bf93a..fa61d207bd30 100644
--- a/include/llvm/ADT/SmallVector.h
+++ b/include/llvm/ADT/SmallVector.h
@@ -17,6 +17,8 @@
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
+#include <cstddef>
+#include <cstdlib>
#include <cstring>
#include <memory>
@@ -70,35 +72,35 @@ protected:
#endif
} FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
-
+
protected:
SmallVectorBase(size_t Size)
: BeginX(&FirstEl), EndX(&FirstEl), CapacityX((char*)&FirstEl+Size) {}
-
+
/// isSmall - Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
-
+
/// size_in_bytes - This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
-
+
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
-
+
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like datatypes and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
-
+
public:
bool empty() const { return BeginX == EndX; }
};
-
+
template <typename T>
class SmallVectorTemplateCommon : public SmallVectorBase {
@@ -106,21 +108,21 @@ protected:
void setEnd(T *P) { this->EndX = P; }
public:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {}
-
+
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
typedef T *iterator;
typedef const T *const_iterator;
-
+
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
-
+
typedef T &reference;
typedef const T &const_reference;
typedef T *pointer;
typedef const T *const_pointer;
-
+
// forward iterator creation methods.
iterator begin() { return (iterator)this->BeginX; }
const_iterator begin() const { return (const_iterator)this->BeginX; }
@@ -130,7 +132,7 @@ protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
-
+
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
@@ -139,16 +141,16 @@ public:
size_type size() const { return end()-begin(); }
size_type max_size() const { return size_type(-1) / sizeof(T); }
-
+
/// capacity - Return the total number of elements in the currently allocated
/// buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
-
+
/// data - Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
/// data - Return a pointer to the vector's buffer, even if empty().
const_pointer data() const { return const_pointer(begin()); }
-
+
reference operator[](unsigned idx) {
assert(begin() + idx < end());
return begin()[idx];
@@ -172,7 +174,7 @@ public:
return end()[-1];
}
};
-
+
/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
/// implementations that are designed to work with non-POD-like T's.
template <typename T, bool isPodLike>
@@ -186,14 +188,14 @@ public:
E->~T();
}
}
-
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
-
+
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0);
@@ -207,34 +209,34 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t NewCapacity = 2*CurCapacity;
if (NewCapacity < MinSize)
NewCapacity = MinSize;
- T *NewElts = static_cast<T*>(operator new(NewCapacity*sizeof(T)));
-
+ T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
+
// Copy the elements over.
this->uninitialized_copy(this->begin(), this->end(), NewElts);
-
+
// Destroy the original elements.
destroy_range(this->begin(), this->end());
-
+
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
- operator delete(this->begin());
-
+ free(this->begin());
+
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
}
-
-
+
+
/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
/// implementations that are designed to work with POD-like T's.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
public:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
-
+
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
-
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
@@ -259,33 +261,35 @@ public:
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
};
-
-
+
+
/// SmallVectorImpl - This class consists of common code factored out of the
/// SmallVector class to reduce code duplication based on the SmallVector 'N'
/// template parameter.
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
+
+ SmallVectorImpl(const SmallVectorImpl&); // DISABLED.
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::size_type size_type;
-
+
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
}
-
+
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
-
+
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
- operator delete(this->begin());
+ free(this->begin());
}
-
-
+
+
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
@@ -319,7 +323,7 @@ public:
if (this->capacity() < N)
this->grow(N);
}
-
+
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
@@ -330,21 +334,21 @@ public:
this->grow();
goto Retry;
}
-
+
void pop_back() {
this->setEnd(this->end()-1);
this->end()->~T();
}
-
+
T pop_back_val() {
T Result = this->back();
pop_back();
return Result;
}
-
-
+
+
void swap(SmallVectorImpl &RHS);
-
+
/// append - Add the specified range to the end of the SmallVector.
///
template<typename in_iter>
@@ -353,26 +357,26 @@ public:
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
-
+
// Copy the new elements over.
// TODO: NEED To compile time dispatch on whether in_iter is a random access
// iterator to use the fast uninitialized_copy.
std::uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
}
-
+
/// append - Add the specified range to the end of the SmallVector.
///
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
-
+
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
}
-
+
void assign(unsigned NumElts, const T &Elt) {
clear();
if (this->capacity() < NumElts)
@@ -380,7 +384,7 @@ public:
this->setEnd(this->begin()+NumElts);
construct_range(this->begin(), this->end(), Elt);
}
-
+
iterator erase(iterator I) {
iterator N = I;
// Shift all elts down one.
@@ -389,7 +393,7 @@ public:
pop_back();
return(N);
}
-
+
iterator erase(iterator S, iterator E) {
iterator N = S;
// Shift all elts down.
@@ -399,13 +403,13 @@ public:
this->setEnd(I);
return(N);
}
-
+
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
push_back(Elt);
return this->end()-1;
}
-
+
if (this->EndX < this->CapacityX) {
Retry:
new (this->end()) T(this->back());
@@ -420,22 +424,22 @@ public:
I = this->begin()+EltNo;
goto Retry;
}
-
+
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->end()-1;
}
-
+
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
-
+
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
-
+
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
-
+
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
@@ -443,48 +447,48 @@ public:
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
-
+
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
-
+
std::fill_n(I, NumToInsert, Elt);
return I;
}
-
+
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
-
+
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
-
+
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
-
+
// Insert the non-overwritten middle part.
std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
return I;
}
-
+
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->end()-1;
}
-
+
size_t NumToInsert = std::distance(From, To);
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
-
+
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
-
+
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
-
+
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
@@ -492,37 +496,37 @@ public:
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
-
+
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
-
+
std::copy(From, To, I);
return I;
}
-
+
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
-
+
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
-
+
// Replace the overwritten part.
for (; NumOverwritten > 0; --NumOverwritten) {
*I = *From;
++I; ++From;
}
-
+
// Insert the non-overwritten middle part.
this->uninitialized_copy(From, To, OldEnd);
return I;
}
-
+
const SmallVectorImpl
&operator=(const SmallVectorImpl &RHS);
-
+
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
return std::equal(this->begin(), this->end(), RHS.begin());
@@ -530,12 +534,12 @@ public:
bool operator!=(const SmallVectorImpl &RHS) const {
return !(*this == RHS);
}
-
+
bool operator<(const SmallVectorImpl &RHS) const {
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
-
+
/// set_size - Set the array size to \arg N, which the current array must have
/// enough capacity for.
///
@@ -549,14 +553,14 @@ public:
assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
-
+
private:
static void construct_range(T *S, T *E, const T &Elt) {
for (; S != E; ++S)
new (S) T(Elt);
}
};
-
+
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
diff --git a/include/llvm/ADT/Statistic.h b/include/llvm/ADT/Statistic.h
index c593c58c1274..3a1319f1090c 100644
--- a/include/llvm/ADT/Statistic.h
+++ b/include/llvm/ADT/Statistic.h
@@ -56,6 +56,10 @@ public:
}
const Statistic &operator++() {
+ // FIXME: This function and all those that follow carefully use an
+ // atomic operation to update the value safely in the presence of
+ // concurrent accesses, but not to read the return value, so the
+ // return value is not thread safe.
sys::AtomicIncrement(&Value);
return init();
}
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index be31ea012831..feade6a56fbd 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -100,7 +100,8 @@ public:
Psp,
Solaris,
Win32,
- Haiku
+ Haiku,
+ Minix
};
private:
@@ -242,8 +243,8 @@ public:
/// environment components with a single string.
void setOSAndEnvironmentName(StringRef Str);
- /// getArchNameForAssembler - Get an architecture name that is understood by the
- /// target assembler.
+ /// getArchNameForAssembler - Get an architecture name that is understood by
+ /// the target assembler.
const char *getArchNameForAssembler();
/// @}
diff --git a/include/llvm/ADT/ValueMap.h b/include/llvm/ADT/ValueMap.h
index 6f57fe8399bc..9e30bd4e6f73 100644
--- a/include/llvm/ADT/ValueMap.h
+++ b/include/llvm/ADT/ValueMap.h
@@ -59,16 +59,16 @@ struct ValueMapConfig {
struct ExtraData {};
template<typename ExtraDataT>
- static void onRAUW(const ExtraDataT &Data, KeyT Old, KeyT New) {}
+ static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
template<typename ExtraDataT>
- static void onDelete(const ExtraDataT &Data, KeyT Old) {}
+ static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
/// Returns a mutex that should be acquired around any changes to the map.
/// This is only acquired from the CallbackVH (and held around calls to onRAUW
/// and onDelete) and not inside other ValueMap methods. NULL means that no
/// mutex is necessary.
template<typename ExtraDataT>
- static sys::Mutex *getMutex(const ExtraDataT &Data) { return NULL; }
+ static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return NULL; }
};
/// See the file comment.
diff --git a/include/llvm/ADT/ilist.h b/include/llvm/ADT/ilist.h
index e4d26ddfa501..9479d00f31be 100644
--- a/include/llvm/ADT/ilist.h
+++ b/include/llvm/ADT/ilist.h
@@ -39,6 +39,7 @@
#define LLVM_ADT_ILIST_H
#include <cassert>
+#include <cstddef>
#include <iterator>
namespace llvm {
diff --git a/include/llvm/AbstractTypeUser.h b/include/llvm/AbstractTypeUser.h
index b6cceb4011ad..81f5c5c7680d 100644
--- a/include/llvm/AbstractTypeUser.h
+++ b/include/llvm/AbstractTypeUser.h
@@ -146,6 +146,7 @@ class PATypeHolder {
mutable const Type *Ty;
void destroy();
public:
+ PATypeHolder() : Ty(0) {}
PATypeHolder(const Type *ty) : Ty(ty) {
addRef();
}
@@ -153,7 +154,7 @@ public:
addRef();
}
- ~PATypeHolder() { if (Ty) dropRef(); }
+ ~PATypeHolder() { dropRef(); }
operator Type *() const { return get(); }
Type *get() const;
diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h
index 9f411350a791..e611a35fc983 100644
--- a/include/llvm/Analysis/AliasAnalysis.h
+++ b/include/llvm/Analysis/AliasAnalysis.h
@@ -165,27 +165,6 @@ public:
/// ModRefInfo - Whether the pointer is loaded or stored to/from.
///
ModRefResult ModRefInfo;
-
- /// AccessType - Specific fine-grained access information for the argument.
- /// If none of these classifications is general enough, the
- /// getModRefBehavior method should not return AccessesArguments*. If a
- /// record is not returned for a particular argument, the argument is never
- /// dead and never dereferenced.
- enum AccessType {
- /// ScalarAccess - The pointer is dereferenced.
- ///
- ScalarAccess,
-
- /// ArrayAccess - The pointer is indexed through as an array of elements.
- ///
- ArrayAccess,
-
- /// ElementAccess ?? P->F only?
-
- /// CallsThrough - Indirect calls are made through the specified function
- /// pointer.
- CallsThrough
- };
};
/// getModRefBehavior - Return the behavior when calling the given call site.
diff --git a/include/llvm/Analysis/CFGPrinter.h b/include/llvm/Analysis/CFGPrinter.h
index 6ad2e5a5b1f9..ac8f59602dab 100644
--- a/include/llvm/Analysis/CFGPrinter.h
+++ b/include/llvm/Analysis/CFGPrinter.h
@@ -1,4 +1,4 @@
-//===-- CFGPrinter.h - CFG printer external interface ------------*- C++ -*-===//
+//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -43,8 +43,8 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
return OS.str();
}
- static std::string getCompleteNodeLabel(const BasicBlock *Node,
- const Function *Graph) {
+ static std::string getCompleteNodeLabel(const BasicBlock *Node,
+ const Function *Graph) {
std::string Str;
raw_string_ostream OS(Str);
diff --git a/include/llvm/Analysis/CaptureTracking.h b/include/llvm/Analysis/CaptureTracking.h
index 493ecf517141..b3390f47d2f3 100644
--- a/include/llvm/Analysis/CaptureTracking.h
+++ b/include/llvm/Analysis/CaptureTracking.h
@@ -21,9 +21,9 @@ namespace llvm {
/// by the enclosing function (which is required to exist). This routine can
/// be expensive, so consider caching the results. The boolean ReturnCaptures
/// specifies whether returning the value (or part of it) from the function
- /// counts as capturing it or not. The boolean StoreCaptures specified whether
- /// storing the value (or part of it) into memory anywhere automatically
- /// counts as capturing it or not.
+ /// counts as capturing it or not. The boolean StoreCaptures specified
+ /// whether storing the value (or part of it) into memory anywhere
+ /// automatically counts as capturing it or not.
bool PointerMayBeCaptured(const Value *V,
bool ReturnCaptures,
bool StoreCaptures);
diff --git a/include/llvm/Analysis/CodeMetrics.h b/include/llvm/Analysis/CodeMetrics.h
new file mode 100644
index 000000000000..58096f1f15b1
--- /dev/null
+++ b/include/llvm/Analysis/CodeMetrics.h
@@ -0,0 +1,72 @@
+//===- CodeMetrics.h - Measures the weight of a function---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various weight measurements for a function, helping
+// the Inliner and PartialSpecialization decide whether to duplicate its
+// contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CODEMETRICS_H
+#define LLVM_ANALYSIS_CODEMETRICS_H
+
+namespace llvm {
+ // CodeMetrics - Calculate size and a few similar metrics for a set of
+ // basic blocks.
+ struct CodeMetrics {
+ /// NeverInline - True if this callee should never be inlined into a
+ /// caller.
+ // bool NeverInline;
+
+ // True if this function contains a call to setjmp or _setjmp
+ bool callsSetJmp;
+
+ // True if this function calls itself
+ bool isRecursive;
+
+ // True if this function contains one or more indirect branches
+ bool containsIndirectBr;
+
+ /// usesDynamicAlloca - True if this function calls alloca (in the C sense).
+ bool usesDynamicAlloca;
+
+ /// NumInsts, NumBlocks - Keep track of how large each function is, which
+ /// is used to estimate the code size cost of inlining it.
+ unsigned NumInsts, NumBlocks;
+
+ /// NumBBInsts - Keeps track of basic block code size estimates.
+ DenseMap<const BasicBlock *, unsigned> NumBBInsts;
+
+ /// NumCalls - Keep track of the number of calls to 'big' functions.
+ unsigned NumCalls;
+
+ /// NumVectorInsts - Keep track of how many instructions produce vector
+ /// values. The inliner is being more aggressive with inlining vector
+ /// kernels.
+ unsigned NumVectorInsts;
+
+ /// NumRets - Keep track of how many Ret instructions the block contains.
+ unsigned NumRets;
+
+ CodeMetrics() : callsSetJmp(false), isRecursive(false),
+ containsIndirectBr(false), usesDynamicAlloca(false),
+ NumInsts(0), NumBlocks(0), NumCalls(0), NumVectorInsts(0),
+ NumRets(0) {}
+
+ /// analyzeBasicBlock - Add information about the specified basic block
+ /// to the current structure.
+ void analyzeBasicBlock(const BasicBlock *BB);
+
+ /// analyzeFunction - Add information about the specified function
+ /// to the current structure.
+ void analyzeFunction(Function *F);
+ };
+}
+
+#endif
diff --git a/include/llvm/Analysis/DebugInfo.h b/include/llvm/Analysis/DebugInfo.h
index 473b127247f3..a85b6bc76967 100644
--- a/include/llvm/Analysis/DebugInfo.h
+++ b/include/llvm/Analysis/DebugInfo.h
@@ -56,6 +56,7 @@ namespace llvm {
}
GlobalVariable *getGlobalVariableField(unsigned Elt) const;
+ Function *getFunctionField(unsigned Elt) const;
public:
explicit DIDescriptor() : DbgNode(0) {}
@@ -409,6 +410,8 @@ namespace llvm {
/// describes - Return true if this subprogram provides debugging
/// information for the function F.
bool describes(const Function *F);
+
+ Function *getFunction() const { return getFunctionField(16); }
};
/// DIGlobalVariable - This is a wrapper for a global variable.
@@ -577,7 +580,8 @@ namespace llvm {
unsigned RunTimeVer = 0);
/// CreateFile - Create a new descriptor for the specified file.
- DIFile CreateFile(StringRef Filename, StringRef Directory, DICompileUnit CU);
+ DIFile CreateFile(StringRef Filename, StringRef Directory,
+ DICompileUnit CU);
/// CreateEnumerator - Create a single enumerator value.
DIEnumerator CreateEnumerator(StringRef Name, uint64_t Val);
@@ -658,7 +662,8 @@ namespace llvm {
unsigned VIndex = 0,
DIType = DIType(),
bool isArtificial = 0,
- bool isOptimized = false);
+ bool isOptimized = false,
+ Function *Fn = 0);
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
/// given declaration.
diff --git a/include/llvm/Analysis/DominatorInternals.h b/include/llvm/Analysis/DominatorInternals.h
index 8cea96d35609..0419688a53bd 100644
--- a/include/llvm/Analysis/DominatorInternals.h
+++ b/include/llvm/Analysis/DominatorInternals.h
@@ -152,8 +152,9 @@ void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT,
}
template<class GraphT>
-typename GraphT::NodeType* Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType *V) {
+typename GraphT::NodeType*
+Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
+ typename GraphT::NodeType *V) {
typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo =
DT.Info[V];
#if !BALANCE_IDOM_TREE
@@ -265,14 +266,17 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
// initialize the semi dominator to point to the parent node
WInfo.Semi = WInfo.Parent;
- for (typename GraphTraits<Inverse<NodeT> >::ChildIteratorType CI =
- GraphTraits<Inverse<NodeT> >::child_begin(W),
- E = GraphTraits<Inverse<NodeT> >::child_end(W); CI != E; ++CI)
- if (DT.Info.count(*CI)) { // Only if this predecessor is reachable!
- unsigned SemiU = DT.Info[Eval<GraphT>(DT, *CI)].Semi;
+ typedef GraphTraits<Inverse<NodeT> > InvTraits;
+ for (typename InvTraits::ChildIteratorType CI =
+ InvTraits::child_begin(W),
+ E = InvTraits::child_end(W); CI != E; ++CI) {
+ typename InvTraits::NodeType *N = *CI;
+ if (DT.Info.count(N)) { // Only if this predecessor is reachable!
+ unsigned SemiU = DT.Info[Eval<GraphT>(DT, N)].Semi;
if (SemiU < WInfo.Semi)
WInfo.Semi = SemiU;
}
+ }
DT.Info[DT.Vertex[WInfo.Semi]].Bucket.push_back(W);
diff --git a/include/llvm/Analysis/Dominators.h b/include/llvm/Analysis/Dominators.h
index f8103103a0e3..1979d3f6820b 100644
--- a/include/llvm/Analysis/Dominators.h
+++ b/include/llvm/Analysis/Dominators.h
@@ -246,22 +246,25 @@ protected:
typename GraphT::NodeType* NewBBSucc = *GraphT::child_begin(NewBB);
std::vector<typename GraphT::NodeType*> PredBlocks;
- for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
- GraphTraits<Inverse<N> >::child_begin(NewBB),
- PE = GraphTraits<Inverse<N> >::child_end(NewBB); PI != PE; ++PI)
+ typedef GraphTraits<Inverse<N> > InvTraits;
+ for (typename InvTraits::ChildIteratorType PI =
+ InvTraits::child_begin(NewBB),
+ PE = InvTraits::child_end(NewBB); PI != PE; ++PI)
PredBlocks.push_back(*PI);
- assert(!PredBlocks.empty() && "No predblocks??");
+ assert(!PredBlocks.empty() && "No predblocks?");
bool NewBBDominatesNewBBSucc = true;
- for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
- GraphTraits<Inverse<N> >::child_begin(NewBBSucc),
- E = GraphTraits<Inverse<N> >::child_end(NewBBSucc); PI != E; ++PI)
- if (*PI != NewBB && !DT.dominates(NewBBSucc, *PI) &&
- DT.isReachableFromEntry(*PI)) {
+ for (typename InvTraits::ChildIteratorType PI =
+ InvTraits::child_begin(NewBBSucc),
+ E = InvTraits::child_end(NewBBSucc); PI != E; ++PI) {
+ typename InvTraits::NodeType *ND = *PI;
+ if (ND != NewBB && !DT.dominates(NewBBSucc, ND) &&
+ DT.isReachableFromEntry(ND)) {
NewBBDominatesNewBBSucc = false;
break;
}
+ }
// Find NewBB's immediate dominator and create new dominator tree node for
// NewBB.
@@ -704,7 +707,6 @@ public:
}
~DominatorTree() {
- DT->releaseMemory();
delete DT;
}
diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h
index cac7cfe2455e..462bddd53307 100644
--- a/include/llvm/Analysis/InlineCost.h
+++ b/include/llvm/Analysis/InlineCost.h
@@ -19,6 +19,7 @@
#include <vector>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ValueMap.h"
+#include "llvm/Analysis/CodeMetrics.h"
namespace llvm {
@@ -29,46 +30,6 @@ namespace llvm {
template<class PtrType, unsigned SmallSize>
class SmallPtrSet;
- // CodeMetrics - Calculate size and a few similar metrics for a set of
- // basic blocks.
- struct CodeMetrics {
- /// NeverInline - True if this callee should never be inlined into a
- /// caller.
- bool NeverInline;
-
- /// usesDynamicAlloca - True if this function calls alloca (in the C sense).
- bool usesDynamicAlloca;
-
- /// NumInsts, NumBlocks - Keep track of how large each function is, which
- /// is used to estimate the code size cost of inlining it.
- unsigned NumInsts, NumBlocks;
-
- /// NumBBInsts - Keeps track of basic block code size estimates.
- DenseMap<const BasicBlock *, unsigned> NumBBInsts;
-
- /// NumCalls - Keep track of the number of calls to 'big' functions.
- unsigned NumCalls;
-
- /// NumVectorInsts - Keep track of how many instructions produce vector
- /// values. The inliner is being more aggressive with inlining vector
- /// kernels.
- unsigned NumVectorInsts;
-
- /// NumRets - Keep track of how many Ret instructions the block contains.
- unsigned NumRets;
-
- CodeMetrics() : NeverInline(false), usesDynamicAlloca(false), NumInsts(0),
- NumBlocks(0), NumCalls(0), NumVectorInsts(0), NumRets(0) {}
-
- /// analyzeBasicBlock - Add information about the specified basic block
- /// to the current structure.
- void analyzeBasicBlock(const BasicBlock *BB);
-
- /// analyzeFunction - Add information about the specified function
- /// to the current structure.
- void analyzeFunction(Function *F);
- };
-
namespace InlineConstants {
// Various magic constants used to adjust heuristics.
const int InstrCost = 5;
@@ -163,6 +124,10 @@ namespace llvm {
/// analyzeFunction - Add information about the specified function
/// to the current structure.
void analyzeFunction(Function *F);
+
+ /// NeverInline - Returns true if the function should never be
+ /// inlined into any caller.
+ bool NeverInline();
};
// The Function* for a function can be changed (by ArgumentPromotion);
diff --git a/include/llvm/Analysis/IntervalIterator.h b/include/llvm/Analysis/IntervalIterator.h
index d842840b66b5..82b3294cc504 100644
--- a/include/llvm/Analysis/IntervalIterator.h
+++ b/include/llvm/Analysis/IntervalIterator.h
@@ -36,9 +36,9 @@
#include "llvm/Analysis/IntervalPartition.h"
#include "llvm/Function.h"
#include "llvm/Support/CFG.h"
-#include <stack>
-#include <set>
#include <algorithm>
+#include <set>
+#include <vector>
namespace llvm {
@@ -88,7 +88,7 @@ inline void addNodeToInterval(Interval *Int, Interval *I) {
template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>,
class IGT = GraphTraits<Inverse<NodeTy*> > >
class IntervalIterator {
- std::stack<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
+ std::vector<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
std::set<BasicBlock*> Visited;
OrigContainer_t *OrigContainer;
bool IOwnMem; // If True, delete intervals when done with them
@@ -116,15 +116,15 @@ public:
if (IOwnMem)
while (!IntStack.empty()) {
delete operator*();
- IntStack.pop();
+ IntStack.pop_back();
}
}
inline bool operator==(const _Self& x) const { return IntStack == x.IntStack;}
inline bool operator!=(const _Self& x) const { return !operator==(x); }
- inline const Interval *operator*() const { return IntStack.top().first; }
- inline Interval *operator*() { return IntStack.top().first; }
+ inline const Interval *operator*() const { return IntStack.back().first; }
+ inline Interval *operator*() { return IntStack.back().first; }
inline const Interval *operator->() const { return operator*(); }
inline Interval *operator->() { return operator*(); }
@@ -133,8 +133,8 @@ public:
do {
// All of the intervals on the stack have been visited. Try visiting
// their successors now.
- Interval::succ_iterator &SuccIt = IntStack.top().second,
- EndIt = succ_end(IntStack.top().first);
+ Interval::succ_iterator &SuccIt = IntStack.back().second,
+ EndIt = succ_end(IntStack.back().first);
while (SuccIt != EndIt) { // Loop over all interval succs
bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
++SuccIt; // Increment iterator
@@ -142,10 +142,10 @@ public:
}
// Free interval memory... if necessary
- if (IOwnMem) delete IntStack.top().first;
+ if (IOwnMem) delete IntStack.back().first;
// We ran out of successors for this interval... pop off the stack
- IntStack.pop();
+ IntStack.pop_back();
} while (!IntStack.empty());
return *this;
@@ -175,7 +175,7 @@ private:
E = GT::child_end(Node); I != E; ++I)
ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));
- IntStack.push(std::make_pair(Int, succ_begin(Int)));
+ IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
return true;
}
diff --git a/include/llvm/Analysis/Loads.h b/include/llvm/Analysis/Loads.h
new file mode 100644
index 000000000000..1574262dd6d3
--- /dev/null
+++ b/include/llvm/Analysis/Loads.h
@@ -0,0 +1,51 @@
+//===- Loads.h - Local load analysis --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares simple local analyses for load instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOADS_H
+#define LLVM_ANALYSIS_LOADS_H
+
+#include "llvm/BasicBlock.h"
+
+namespace llvm {
+
+class AliasAnalysis;
+class TargetData;
+
+/// isSafeToLoadUnconditionally - Return true if we know that executing a load
+/// from this value cannot trap. If it is not obviously safe to load from the
+/// specified pointer, we do a quick local scan of the basic block containing
+/// ScanFrom, to determine if the address is already accessed.
+bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
+ unsigned Align, const TargetData *TD = 0);
+
+/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
+/// the instruction before ScanFrom) checking to see if we have the value at
+/// the memory address *Ptr locally available within a small number of
+/// instructions. If the value is available, return it.
+///
+/// If not, return the iterator for the last validated instruction that the
+/// value would be live through. If we scanned the entire block and didn't
+/// find something that invalidates *Ptr or provides it, ScanFrom would be
+/// left at begin() and this returns null. ScanFrom could also be left
+///
+/// MaxInstsToScan specifies the maximum instructions to scan in the block.
+/// If it is set to 0, it will scan the whole block. You can also optionally
+/// specify an alias analysis implementation, which makes this more precise.
+Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
+ BasicBlock::iterator &ScanFrom,
+ unsigned MaxInstsToScan = 6,
+ AliasAnalysis *AA = 0);
+
+}
+
+#endif
diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h
index 2babc25cb140..9455fd840326 100644
--- a/include/llvm/Analysis/LoopInfo.h
+++ b/include/llvm/Analysis/LoopInfo.h
@@ -256,6 +256,27 @@ public:
///
BlockT *getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
+ BlockT *Out = getLoopPredecessor();
+ if (!Out) return 0;
+
+ // Make sure there is only one exit out of the preheader.
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
+ ++SI;
+ if (SI != BlockTraits::child_end(Out))
+ return 0; // Multiple exits from the block, must not be a preheader.
+
+ // The predecessor has exactly one successor, so it is a preheader.
+ return Out;
+ }
+
+ /// getLoopPredecessor - If the given loop's header has exactly one unique
+ /// predecessor outside the loop, return it. Otherwise return null.
+ /// This is less strict that the loop "preheader" concept, which requires
+ /// the predecessor to have exactly one successor.
+ ///
+ BlockT *getLoopPredecessor() const {
+ // Keep track of nodes outside the loop branching to the header...
BlockT *Out = 0;
// Loop over the predecessors of the header node...
@@ -264,22 +285,17 @@ public:
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
- PE = InvBlockTraits::child_end(Header); PI != PE; ++PI)
- if (!contains(*PI)) { // If the block is not in the loop...
- if (Out && Out != *PI)
+ PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (!contains(N)) { // If the block is not in the loop...
+ if (Out && Out != N)
return 0; // Multiple predecessors outside the loop
- Out = *PI;
+ Out = N;
}
+ }
// Make sure there is only one exit out of the preheader.
assert(Out && "Header of loop has no predecessors from outside loop?");
- typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
- ++SI;
- if (SI != BlockTraits::child_end(Out))
- return 0; // Multiple exits from the block, must not be a preheader.
-
- // If there is exactly one preheader, return it. If there was zero, then
- // Out is still null.
return Out;
}
@@ -293,11 +309,13 @@ public:
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
BlockT *Latch = 0;
- for (; PI != PE; ++PI)
- if (contains(*PI)) {
+ for (; PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (contains(N)) {
if (Latch) return 0;
- Latch = *PI;
+ Latch = N;
}
+ }
return Latch;
}
@@ -409,10 +427,11 @@ public:
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
- if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *PI))
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
HasInsideLoopPreds = true;
else
- OutsideLoopPreds.push_back(*PI);
+ OutsideLoopPreds.push_back(N);
}
if (BB == getHeader()) {
@@ -743,9 +762,11 @@ public:
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType I =
InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB);
- I != E; ++I)
- if (DT.dominates(BB, *I)) // If BB dominates its predecessor...
- TodoStack.push_back(*I);
+ I != E; ++I) {
+ typename InvBlockTraits::NodeType *N = *I;
+ if (DT.dominates(BB, N)) // If BB dominates its predecessor...
+ TodoStack.push_back(N);
+ }
if (TodoStack.empty()) return 0; // No backedges to this block...
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index a7f42c9e37f2..a4f916227b8b 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -72,8 +72,8 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
// free Call Utility Functions.
//
-/// isFreeCall - Returns true if the value is a call to the builtin free()
-bool isFreeCall(const Value *I);
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *isFreeCall(const Value *I);
} // End llvm namespace
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index d3a8d8f4fe51..8da3af0c7a56 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -343,10 +343,6 @@ namespace llvm {
BackedgeTakenInfo HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
const Loop *L, bool isSigned);
- /// getLoopPredecessor - If the given loop's header has exactly one unique
- /// predecessor outside the loop, return it. Otherwise return null.
- BasicBlock *getLoopPredecessor(const Loop *L);
-
/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
/// (which may not be an immediate predecessor) which has exactly one
/// successor from which BB is reachable, or null if no such block is
@@ -530,10 +526,6 @@ namespace llvm {
/// widening.
const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty);
- /// getIntegerSCEV - Given a SCEVable type, create a constant for the
- /// specified signed integer value and return a SCEV for the constant.
- const SCEV *getIntegerSCEV(int64_t Val, const Type *Ty);
-
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umax operation
/// with them.
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index baf6946b8cf8..9501555aacf1 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -32,6 +32,7 @@ namespace llvm {
std::map<std::pair<const SCEV *, Instruction *>, AssertingVH<Value> >
InsertedExpressions;
std::set<Value*> InsertedValues;
+ std::set<Value*> InsertedPostIncValues;
/// PostIncLoops - Addrecs referring to any of the given loops are expanded
/// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode
@@ -102,6 +103,10 @@ namespace llvm {
/// clearPostInc - Disable all post-inc expansion.
void clearPostInc() {
PostIncLoops.clear();
+
+ // When we change the post-inc loop set, cached expansions may no
+ // longer be valid.
+ InsertedPostIncValues.clear();
}
/// disableCanonicalMode - Disable the behavior of expanding expressions in
@@ -123,6 +128,14 @@ namespace llvm {
/// of work to avoid inserting an obviously redundant operation.
Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
+ /// ReuseOrCreateCast - Arange for there to be a cast of V to Ty at IP,
+ /// reusing an existing cast if a suitable one exists, moving an existing
+ /// cast if a suitable one exists but isn't in the right place, or
+ /// or creating a new one.
+ Value *ReuseOrCreateCast(Value *V, const Type *Ty,
+ Instruction::CastOps Op,
+ BasicBlock::iterator IP);
+
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to
/// share the casts.
@@ -146,7 +159,7 @@ namespace llvm {
/// inserted by the code rewriter. If so, the client should not modify the
/// instruction.
bool isInsertedInstruction(Instruction *I) const {
- return InsertedValues.count(I);
+ return InsertedValues.count(I) || InsertedPostIncValues.count(I);
}
Value *visitConstant(const SCEVConstant *S) {
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index d58089748413..b9634f04ec4c 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -97,7 +97,7 @@ namespace llvm {
- /// FindScalarValue - Given an aggregrate and an sequence of indices, see if
+ /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
/// the scalar value indexed is already around as a register, for example if
/// it were inserted directly into the aggregrate.
///
diff --git a/include/llvm/Bitcode/ReaderWriter.h b/include/llvm/Bitcode/ReaderWriter.h
index 45eb801a8c03..a186964743dc 100644
--- a/include/llvm/Bitcode/ReaderWriter.h
+++ b/include/llvm/Bitcode/ReaderWriter.h
@@ -40,7 +40,8 @@ namespace llvm {
std::string *ErrMsg = 0);
/// WriteBitcodeToFile - Write the specified module to the specified
- /// raw output stream.
+ /// raw output stream. For streams where it matters, the given stream
+ /// should be in "binary" mode.
void WriteBitcodeToFile(const Module *M, raw_ostream &Out);
/// WriteBitcodeToStream - Write the specified module to the specified
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index 243ddbb5da34..7ca6c6257f2e 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -64,7 +64,7 @@ namespace llvm {
/// Target machine description.
///
TargetMachine &TM;
-
+
/// Target Asm Printer information.
///
const MCAsmInfo *MAI;
@@ -73,13 +73,13 @@ namespace llvm {
/// streaming. This owns all of the global MC-related objects for the
/// generated translation unit.
MCContext &OutContext;
-
+
/// OutStreamer - This is the MCStreamer object for the file we are
/// generating. This contains the transient state for the current
/// translation unit that we are generating (such as the current section
/// etc).
MCStreamer &OutStreamer;
-
+
/// The current machine function.
const MachineFunction *MF;
@@ -94,30 +94,30 @@ namespace llvm {
/// beginning of each call to runOnMachineFunction().
///
MCSymbol *CurrentFnSym;
-
+
private:
// GCMetadataPrinters - The garbage collection metadata printer table.
void *GCMetadataPrinters; // Really a DenseMap.
-
+
/// VerboseAsm - Emit comments in assembly output if this is true.
///
bool VerboseAsm;
static char ID;
-
+
/// If VerboseAsm is set, a pointer to the loop info for this
/// function.
MachineLoopInfo *LI;
/// DD - If the target supports dwarf debug info, this pointer is non-null.
DwarfDebug *DD;
-
+
/// DE - If the target supports dwarf exception info, this pointer is
/// non-null.
DwarfException *DE;
-
+
protected:
explicit AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
-
+
public:
virtual ~AsmPrinter();
@@ -128,7 +128,7 @@ namespace llvm {
/// getFunctionNumber - Return a unique ID for the current function.
///
unsigned getFunctionNumber() const;
-
+
/// getObjFileLowering - Return information about object file lowering.
const TargetLoweringObjectFile &getObjFileLowering() const;
@@ -137,16 +137,16 @@ namespace llvm {
/// getCurrentSection() - Return the current section we are emitting to.
const MCSection *getCurrentSection() const;
-
-
+
+
//===------------------------------------------------------------------===//
// MachineFunctionPass Implementation.
//===------------------------------------------------------------------===//
-
+
/// getAnalysisUsage - Record analysis usage.
- ///
+ ///
void getAnalysisUsage(AnalysisUsage &AU) const;
-
+
/// doInitialization - Set up the AsmPrinter when we are working on a new
/// module. If your pass overrides this, it must make sure to explicitly
/// call this implementation.
@@ -155,7 +155,7 @@ namespace llvm {
/// doFinalization - Shut down the asmprinter. If you override this in your
/// pass, you must make sure to call it explicitly.
bool doFinalization(Module &M);
-
+
/// runOnMachineFunction - Emit the specified function out to the
/// OutStreamer.
virtual bool runOnMachineFunction(MachineFunction &MF) {
@@ -163,20 +163,20 @@ namespace llvm {
EmitFunctionHeader();
EmitFunctionBody();
return false;
- }
-
+ }
+
//===------------------------------------------------------------------===//
// Coarse grained IR lowering routines.
//===------------------------------------------------------------------===//
-
+
/// SetupMachineFunction - This should be called when a new MachineFunction
/// is being processed from runOnMachineFunction.
void SetupMachineFunction(MachineFunction &MF);
-
+
/// EmitFunctionHeader - This method emits the header for the current
/// function.
void EmitFunctionHeader();
-
+
/// EmitFunctionBody - This method emits the body and trailer for a
/// function.
void EmitFunctionBody();
@@ -187,15 +187,15 @@ namespace llvm {
/// the code generator.
///
virtual void EmitConstantPool();
-
- /// EmitJumpTableInfo - Print assembly representations of the jump tables
- /// used by the current function to the current output stream.
+
+ /// EmitJumpTableInfo - Print assembly representations of the jump tables
+ /// used by the current function to the current output stream.
///
void EmitJumpTableInfo();
-
+
/// EmitGlobalVariable - Emit the specified global variable to the .s file.
virtual void EmitGlobalVariable(const GlobalVariable *GV);
-
+
/// EmitSpecialLLVMGlobal - Check to see if the specified global is a
/// special global used by LLVM. If so, emit it and return true, otherwise
/// do nothing and return false.
@@ -208,54 +208,54 @@ namespace llvm {
/// if required for correctness.
///
void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
-
+
/// EmitBasicBlockStart - This method prints the label for the specified
/// MachineBasicBlock, an alignment (if present) and a comment describing
/// it if appropriate.
void EmitBasicBlockStart(const MachineBasicBlock *MBB) const;
-
+
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
void EmitGlobalConstant(const Constant *CV, unsigned AddrSpace = 0);
-
-
+
+
//===------------------------------------------------------------------===//
// Overridable Hooks
//===------------------------------------------------------------------===//
-
+
// Targets can, or in the case of EmitInstruction, must implement these to
// customize output.
-
+
/// EmitStartOfAsmFile - This virtual method can be overridden by targets
/// that want to emit something at the start of their file.
virtual void EmitStartOfAsmFile(Module &) {}
-
+
/// EmitEndOfAsmFile - This virtual method can be overridden by targets that
/// want to emit something at the end of their file.
virtual void EmitEndOfAsmFile(Module &) {}
-
+
/// EmitFunctionBodyStart - Targets can override this to emit stuff before
/// the first basic block in the function.
virtual void EmitFunctionBodyStart() {}
-
+
/// EmitFunctionBodyEnd - Targets can override this to emit stuff after
/// the last basic block in the function.
virtual void EmitFunctionBodyEnd() {}
-
+
/// EmitInstruction - Targets should implement this to emit instructions.
virtual void EmitInstruction(const MachineInstr *) {
assert(0 && "EmitInstruction not implemented");
}
-
+
virtual void EmitFunctionEntryLabel();
-
+
virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
-
+
/// isBlockOnlyReachableByFallthough - Return true if the basic block has
/// exactly one predecessor and the control transfer mechanism between
/// the predecessor and this block is a fall-through.
virtual bool
isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
-
+
//===------------------------------------------------------------------===//
// Symbol Lowering Routines.
//===------------------------------------------------------------------===//
@@ -264,23 +264,23 @@ namespace llvm {
/// GetTempSymbol - Return the MCSymbol corresponding to the assembler
/// temporary label with the specified stem and unique ID.
MCSymbol *GetTempSymbol(StringRef Name, unsigned ID) const;
-
+
/// GetTempSymbol - Return an assembler temporary label with the specified
/// stem.
MCSymbol *GetTempSymbol(StringRef Name) const;
-
-
+
+
/// GetSymbolWithGlobalValueBase - Return the MCSymbol for a symbol with
/// global value name as its base, with the specified suffix, and where the
/// symbol is forced to have private linkage if ForcePrivate is true.
MCSymbol *GetSymbolWithGlobalValueBase(const GlobalValue *GV,
StringRef Suffix,
bool ForcePrivate = true) const;
-
+
/// GetExternalSymbolSymbol - Return the MCSymbol for the specified
/// ExternalSymbol.
MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
-
+
/// GetCPISymbol - Return the symbol for the specified constant pool entry.
MCSymbol *GetCPISymbol(unsigned CPID) const;
@@ -302,42 +302,42 @@ namespace llvm {
public:
/// printOffset - This is just convenient handler for printing offsets.
void printOffset(int64_t Offset, raw_ostream &OS) const;
-
+
/// EmitInt8 - Emit a byte directive and value.
///
void EmitInt8(int Value) const;
-
+
/// EmitInt16 - Emit a short directive and value.
///
void EmitInt16(int Value) const;
-
+
/// EmitInt32 - Emit a long directive and value.
///
void EmitInt32(int Value) const;
-
+
/// EmitLabelDifference - Emit something like ".long Hi-Lo" where the size
/// in bytes of the directive is specified by Size and Hi/Lo specify the
/// labels. This implicitly uses .set if it is available.
void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
unsigned Size) const;
-
- /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
+
+ /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
/// where the size in bytes of the directive is specified by Size and Hi/Lo
/// specify the labels. This implicitly uses .set if it is available.
void EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
const MCSymbol *Lo, unsigned Size) const;
-
+
//===------------------------------------------------------------------===//
// Dwarf Emission Helper Routines
//===------------------------------------------------------------------===//
-
+
/// EmitSLEB128 - emit the specified signed leb128 value.
void EmitSLEB128(int Value, const char *Desc = 0) const;
-
+
/// EmitULEB128 - emit the specified unsigned leb128 value.
void EmitULEB128(unsigned Value, const char *Desc = 0,
unsigned PadTo = 0) const;
-
+
/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
void EmitCFAByte(unsigned Val) const;
@@ -346,15 +346,15 @@ namespace llvm {
/// describing the encoding. Desc is a string saying what the encoding is
/// specifying (e.g. "LSDA").
void EmitEncodingByte(unsigned Val, const char *Desc = 0) const;
-
+
/// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
-
+
/// EmitReference - Emit a reference to a label with a specified encoding.
///
void EmitReference(const MCSymbol *Sym, unsigned Encoding) const;
void EmitReference(const GlobalValue *GV, unsigned Encoding) const;
-
+
/// EmitSectionOffset - Emit the 4-byte offset of Label from the start of
/// its section. This can be done with a special directive if the target
/// supports it (e.g. cygwin) or by emitting it as an offset from a label at
@@ -372,20 +372,20 @@ namespace llvm {
//===------------------------------------------------------------------===//
// Dwarf Lowering Routines
//===------------------------------------------------------------------===//
-
+
/// EmitFrameMoves - Emit frame instructions to describe the layout of the
/// frame.
- void EmitFrameMoves(const std::vector<MachineMove> &Moves,
+ void EmitFrameMoves(const std::vector<MachineMove> &Moves,
MCSymbol *BaseLabel, bool isEH) const;
-
-
+
+
//===------------------------------------------------------------------===//
// Inline Asm Support
//===------------------------------------------------------------------===//
public:
// These are hooks that targets can override to implement inline asm
// support. These should probably be moved out of AsmPrinter someday.
-
+
/// PrintSpecial - Print information related to the specified machine instr
/// that is independent of the operand, and may be independent of the instr
/// itself. This can be useful for portably encoding the comment character
@@ -394,7 +394,7 @@ namespace llvm {
/// for their own strange codes.
virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
const char *Code) const;
-
+
/// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
/// instruction, using the specified assembler variant. Targets should
/// override this to format as appropriate. This method can return true if
@@ -402,16 +402,16 @@ namespace llvm {
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &OS);
-
+
/// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
/// instruction, using the specified assembler variant as an address.
/// Targets should override this to format as appropriate. This method can
/// return true if the operand is erroneous.
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
+ unsigned AsmVariant,
const char *ExtraCode,
raw_ostream &OS);
-
+
private:
/// Private state for PrintSpecial()
// Assign a unique ID to this machine instruction.
@@ -422,7 +422,7 @@ namespace llvm {
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
void EmitInlineAsm(StringRef Str, unsigned LocCookie) const;
-
+
/// EmitInlineAsm - This method formats and emits the specified machine
/// instruction that is an inline asm.
void EmitInlineAsm(const MachineInstr *MI) const;
@@ -430,13 +430,13 @@ namespace llvm {
//===------------------------------------------------------------------===//
// Internal Implementation Details
//===------------------------------------------------------------------===//
-
+
/// EmitVisibility - This emits visibility information about symbol, if
/// this is suported by the target.
void EmitVisibility(MCSymbol *Sym, unsigned Visibility) const;
-
+
void EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const;
-
+
void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid) const;
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h
index 45a2757d3783..7911907e8943 100644
--- a/include/llvm/CodeGen/CallingConvLower.h
+++ b/include/llvm/CodeGen/CallingConvLower.h
@@ -17,14 +17,13 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Target/TargetCallingConv.h"
#include "llvm/CallingConv.h"
namespace llvm {
class TargetRegisterInfo;
class TargetMachine;
class CCState;
- class SDNode;
/// CCValAssign - Represent assignment of one arg/retval to a location.
class CCValAssign {
@@ -35,6 +34,9 @@ public:
ZExt, // The value is zero extended in the location.
AExt, // The value is extended with undefined upper bits.
BCvt, // The value is bit-converted in the location.
+ VExt, // The value is vector-widened in the location.
+ // FIXME: Not implemented yet. Code that uses AExt to mean
+ // vector-widen should be fixed to use VExt instead.
Indirect // The location contains pointer to the value.
// TODO: a subset of the value is in the location.
};
@@ -186,8 +188,7 @@ public:
/// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and
/// false otherwise.
- bool CheckReturn(const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+ bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index 005c7bcf21d9..7f3a7c776931 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -19,11 +19,13 @@
#include "llvm/ADT/SmallSet.h"
#endif
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm {
class AllocaInst;
class ConstantFP;
+class FunctionLoweringInfo;
class Instruction;
class MachineBasicBlock;
class MachineConstantPool;
@@ -36,22 +38,15 @@ class TargetInstrInfo;
class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
+class TargetRegisterInfo;
/// FastISel - This is a fast-path instruction selection class that
/// generates poor code and doesn't support illegal types or non-trivial
/// lowering, but runs quickly.
class FastISel {
protected:
- MachineBasicBlock *MBB;
DenseMap<const Value *, unsigned> LocalValueMap;
- DenseMap<const Value *, unsigned> &ValueMap;
- DenseMap<const BasicBlock *, MachineBasicBlock *> &MBBMap;
- DenseMap<const AllocaInst *, int> &StaticAllocaMap;
- std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate;
-#ifndef NDEBUG
- SmallSet<const Instruction *, 8> &CatchInfoLost;
-#endif
- MachineFunction &MF;
+ FunctionLoweringInfo &FuncInfo;
MachineRegisterInfo &MRI;
MachineFrameInfo &MFI;
MachineConstantPool &MCP;
@@ -60,23 +55,22 @@ protected:
const TargetData &TD;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- bool IsBottomUp;
+ const TargetRegisterInfo &TRI;
+ MachineInstr *LastLocalValue;
public:
+ /// getLastLocalValue - Return the position of the last instruction
+ /// emitted for materializing constants for use in the current block.
+ MachineInstr *getLastLocalValue() { return LastLocalValue; }
+
+ /// setLastLocalValue - Update the position of the last instruction
+ /// emitted for materializing constants for use in the current block.
+ void setLastLocalValue(MachineInstr *I) { LastLocalValue = I; }
+
/// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map.
///
- void startNewBlock(MachineBasicBlock *mbb) {
- setCurrentBlock(mbb);
- LocalValueMap.clear();
- }
-
- /// setCurrentBlock - Set the current block to which generated machine
- /// instructions will be appended.
- ///
- void setCurrentBlock(MachineBasicBlock *mbb) {
- MBB = mbb;
- }
+ void startNewBlock();
/// getCurDebugLoc() - Return current debug location information.
DebugLoc getCurDebugLoc() const { return DL; }
@@ -108,18 +102,21 @@ public:
/// index value.
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
+ /// recomputeInsertPt - Reset InsertPt to prepare for insterting instructions
+ /// into the current block.
+ void recomputeInsertPt();
+
+ /// enterLocalValueArea - Prepare InsertPt to begin inserting instructions
+ /// into the local value area and return the old insert position.
+ MachineBasicBlock::iterator enterLocalValueArea();
+
+ /// leaveLocalValueArea - Reset InsertPt to the given old insert position
+ void leaveLocalValueArea(MachineBasicBlock::iterator OldInsertPt);
+
virtual ~FastISel();
protected:
- FastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am,
- std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &cil
-#endif
- );
+ explicit FastISel(FunctionLoweringInfo &funcInfo);
/// TargetSelectInstruction - This method is called by target-independent
/// code when the normal FastISel process fails to select an instruction.
@@ -286,7 +283,7 @@ protected:
/// FastEmitBranch - Emit an unconditional branch to the given block,
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
- void FastEmitBranch(MachineBasicBlock *MBB);
+ void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
unsigned UpdateValueMap(const Value* I, unsigned Reg);
@@ -305,6 +302,8 @@ protected:
}
private:
+ bool SelectLoad(const User *I);
+
bool SelectBinaryOp(const User *I, unsigned ISDOpcode);
bool SelectFNeg(const User *I);
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
new file mode 100644
index 000000000000..c49d1edb20f2
--- /dev/null
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -0,0 +1,154 @@
+//===-- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements routines for translating functions from LLVM IR into
+// Machine IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+
+#include "llvm/InlineAsm.h"
+#include "llvm/Instructions.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#ifndef NDEBUG
+#include "llvm/ADT/SmallSet.h"
+#endif
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Support/CallSite.h"
+#include <vector>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class Function;
+class GlobalVariable;
+class Instruction;
+class MachineInstr;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineModuleInfo;
+class MachineRegisterInfo;
+class TargetLowering;
+class Value;
+
+//===--------------------------------------------------------------------===//
+/// FunctionLoweringInfo - This contains information that is global to a
+/// function that is used when lowering a region of the function.
+///
+class FunctionLoweringInfo {
+public:
+ const TargetLowering &TLI;
+ const Function *Fn;
+ MachineFunction *MF;
+ MachineRegisterInfo *RegInfo;
+
+ /// CanLowerReturn - true iff the function's return value can be lowered to
+ /// registers.
+ bool CanLowerReturn;
+
+ /// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
+ /// allocated to hold a pointer to the hidden sret parameter.
+ unsigned DemoteRegister;
+
+ /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
+ DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
+
+ /// ValueMap - Since we emit code for the function a basic block at a time,
+ /// we must remember which virtual registers hold the values for
+ /// cross-basic-block values.
+ DenseMap<const Value*, unsigned> ValueMap;
+
+ /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
+ /// the entry block. This allows the allocas to be efficiently referenced
+ /// anywhere in the function.
+ DenseMap<const AllocaInst*, int> StaticAllocaMap;
+
+ /// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
+ /// function arguments that are inserted after scheduling is completed.
+ SmallVector<MachineInstr*, 8> ArgDbgValues;
+
+ /// RegFixups - Registers which need to be replaced after isel is done.
+ DenseMap<unsigned, unsigned> RegFixups;
+
+ /// MBB - The current block.
+ MachineBasicBlock *MBB;
+
+ /// MBB - The current insert position inside the current block.
+ MachineBasicBlock::iterator InsertPt;
+
+#ifndef NDEBUG
+ SmallSet<const Instruction *, 8> CatchInfoLost;
+ SmallSet<const Instruction *, 8> CatchInfoFound;
+#endif
+
+ struct LiveOutInfo {
+ unsigned NumSignBits;
+ APInt KnownOne, KnownZero;
+ LiveOutInfo() : NumSignBits(0), KnownOne(1, 0), KnownZero(1, 0) {}
+ };
+
+ /// LiveOutRegInfo - Information about live out vregs, indexed by their
+ /// register number offset by 'FirstVirtualRegister'.
+ std::vector<LiveOutInfo> LiveOutRegInfo;
+
+ /// PHINodesToUpdate - A list of phi instructions whose operand list will
+ /// be updated after processing the current basic block.
+ /// TODO: This isn't per-function state, it's per-basic-block state. But
+ /// there's no other convenient place for it to live right now.
+ std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+
+ explicit FunctionLoweringInfo(const TargetLowering &TLI);
+
+ /// set - Initialize this FunctionLoweringInfo with the given Function
+ /// and its associated MachineFunction.
+ ///
+ void set(const Function &Fn, MachineFunction &MF);
+
+ /// clear - Clear out all the function-specific state. This returns this
+ /// FunctionLoweringInfo to an empty state, ready to be used for a
+ /// different function.
+ void clear();
+
+ /// isExportedInst - Return true if the specified value is an instruction
+ /// exported from its block.
+ bool isExportedInst(const Value *V) {
+ return ValueMap.count(V);
+ }
+
+ unsigned CreateReg(EVT VT);
+
+ unsigned CreateRegs(const Type *Ty);
+
+ unsigned InitializeRegForValue(const Value *V) {
+ unsigned &R = ValueMap[V];
+ assert(R == 0 && "Already initialized this value register!");
+ return R = CreateRegs(V->getType());
+ }
+};
+
+/// AddCatchInfo - Extract the personality and type infos from an eh.selector
+/// call, and add them to the specified machine basic block.
+void AddCatchInfo(const CallInst &I,
+ MachineModuleInfo *MMI, MachineBasicBlock *MBB);
+
+/// CopyCatchInfo - Copy catch information from DestBB to SrcBB.
+void CopyCatchInfo(const BasicBlock *SrcBB, const BasicBlock *DestBB,
+ MachineModuleInfo *MMI, FunctionLoweringInfo &FLI);
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/CodeGen/GCMetadata.h b/include/llvm/CodeGen/GCMetadata.h
index 6de69cd82907..b401068140b0 100644
--- a/include/llvm/CodeGen/GCMetadata.h
+++ b/include/llvm/CodeGen/GCMetadata.h
@@ -1,4 +1,4 @@
-//===-- GCMetadata.h - Garbage collector metadata -------------------------===//
+//===-- GCMetadata.h - Garbage collector metadata ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
//
// The GCFunctionInfo class logs the data necessary to build a type accurate
// stack map. The code generator outputs:
-//
+//
// - Safe points as specified by the GCStrategy's NeededSafePoints.
// - Stack offsets for GC roots, as specified by calls to llvm.gcroot
//
@@ -42,10 +42,10 @@ namespace llvm {
class GCStrategy;
class Constant;
class MCSymbol;
-
+
namespace GC {
/// PointKind - The type of a collector-safe point.
- ///
+ ///
enum PointKind {
Loop, //< Instr is a loop (backwards branch).
Return, //< Instr is a return instruction.
@@ -53,138 +53,138 @@ namespace llvm {
PostCall //< Instr is the return address of a call.
};
}
-
+
/// GCPoint - Metadata for a collector-safe point in machine code.
- ///
+ ///
struct GCPoint {
GC::PointKind Kind; //< The kind of the safe point.
MCSymbol *Label; //< A label.
-
+
GCPoint(GC::PointKind K, MCSymbol *L) : Kind(K), Label(L) {}
};
-
+
/// GCRoot - Metadata for a pointer to an object managed by the garbage
/// collector.
struct GCRoot {
int Num; //< Usually a frame index.
int StackOffset; //< Offset from the stack pointer.
const Constant *Metadata;//< Metadata straight from the call to llvm.gcroot.
-
+
GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
};
-
-
+
+
/// GCFunctionInfo - Garbage collection metadata for a single function.
- ///
+ ///
class GCFunctionInfo {
public:
typedef std::vector<GCPoint>::iterator iterator;
typedef std::vector<GCRoot>::iterator roots_iterator;
typedef std::vector<GCRoot>::const_iterator live_iterator;
-
+
private:
const Function &F;
GCStrategy &S;
uint64_t FrameSize;
std::vector<GCRoot> Roots;
std::vector<GCPoint> SafePoints;
-
+
// FIXME: Liveness. A 2D BitVector, perhaps?
- //
+ //
// BitVector Liveness;
- //
+ //
// bool islive(int point, int root) =
// Liveness[point * SafePoints.size() + root]
- //
+ //
// The bit vector is the more compact representation where >3.2% of roots
// are live per safe point (1.5% on 64-bit hosts).
-
+
public:
GCFunctionInfo(const Function &F, GCStrategy &S);
~GCFunctionInfo();
-
+
/// getFunction - Return the function to which this metadata applies.
- ///
+ ///
const Function &getFunction() const { return F; }
-
+
/// getStrategy - Return the GC strategy for the function.
- ///
+ ///
GCStrategy &getStrategy() { return S; }
-
+
/// addStackRoot - Registers a root that lives on the stack. Num is the
/// stack object ID for the alloca (if the code generator is
// using MachineFrameInfo).
void addStackRoot(int Num, const Constant *Metadata) {
Roots.push_back(GCRoot(Num, Metadata));
}
-
+
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
- /// label just prior to the safe point (if the code generator is using
+ /// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
void addSafePoint(GC::PointKind Kind, MCSymbol *Label) {
SafePoints.push_back(GCPoint(Kind, Label));
}
-
+
/// getFrameSize/setFrameSize - Records the function's frame size.
- ///
+ ///
uint64_t getFrameSize() const { return FrameSize; }
void setFrameSize(uint64_t S) { FrameSize = S; }
-
+
/// begin/end - Iterators for safe points.
- ///
+ ///
iterator begin() { return SafePoints.begin(); }
iterator end() { return SafePoints.end(); }
size_t size() const { return SafePoints.size(); }
-
+
/// roots_begin/roots_end - Iterators for all roots in the function.
- ///
+ ///
roots_iterator roots_begin() { return Roots.begin(); }
roots_iterator roots_end () { return Roots.end(); }
size_t roots_size() const { return Roots.size(); }
-
+
/// live_begin/live_end - Iterators for live roots at a given safe point.
- ///
+ ///
live_iterator live_begin(const iterator &p) { return roots_begin(); }
live_iterator live_end (const iterator &p) { return roots_end(); }
size_t live_size(const iterator &p) const { return roots_size(); }
};
-
-
+
+
/// GCModuleInfo - Garbage collection metadata for a whole module.
- ///
+ ///
class GCModuleInfo : public ImmutablePass {
typedef StringMap<GCStrategy*> strategy_map_type;
typedef std::vector<GCStrategy*> list_type;
typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type;
-
+
strategy_map_type StrategyMap;
list_type StrategyList;
finfo_map_type FInfoMap;
-
+
GCStrategy *getOrCreateStrategy(const Module *M, const std::string &Name);
-
+
public:
typedef list_type::const_iterator iterator;
-
+
static char ID;
-
+
GCModuleInfo();
~GCModuleInfo();
-
+
/// clear - Resets the pass. The metadata deleter pass calls this.
- ///
+ ///
void clear();
-
+
/// begin/end - Iterators for used strategies.
- ///
+ ///
iterator begin() const { return StrategyList.begin(); }
iterator end() const { return StrategyList.end(); }
-
+
/// get - Look up function metadata.
- ///
+ ///
GCFunctionInfo &getFunctionInfo(const Function &F);
};
-
+
}
#endif
diff --git a/include/llvm/CodeGen/GCMetadataPrinter.h b/include/llvm/CodeGen/GCMetadataPrinter.h
index 3703545bb6f6..17a265300000 100644
--- a/include/llvm/CodeGen/GCMetadataPrinter.h
+++ b/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -25,49 +25,49 @@
#include "llvm/Support/Registry.h"
namespace llvm {
-
+
class GCMetadataPrinter;
-
+
/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
/// defaults from Registry.
typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
-
+
/// GCMetadataPrinter - Emits GC metadata as assembly code.
- ///
+ ///
class GCMetadataPrinter {
public:
typedef GCStrategy::list_type list_type;
typedef GCStrategy::iterator iterator;
-
+
private:
GCStrategy *S;
-
+
friend class AsmPrinter;
-
+
protected:
// May only be subclassed.
GCMetadataPrinter();
-
+
// Do not implement.
GCMetadataPrinter(const GCMetadataPrinter &);
GCMetadataPrinter &operator=(const GCMetadataPrinter &);
-
+
public:
GCStrategy &getStrategy() { return *S; }
const Module &getModule() const { return S->getModule(); }
-
+
/// begin/end - Iterate over the collected function metadata.
iterator begin() { return S->begin(); }
iterator end() { return S->end(); }
-
+
/// beginAssembly/finishAssembly - Emit module metadata as assembly code.
virtual void beginAssembly(AsmPrinter &AP);
-
+
virtual void finishAssembly(AsmPrinter &AP);
-
+
virtual ~GCMetadataPrinter();
};
-
+
}
#endif
diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h
index a5e9dd5fe84a..69de5986dd8f 100644
--- a/include/llvm/CodeGen/ISDOpcodes.h
+++ b/include/llvm/CodeGen/ISDOpcodes.h
@@ -130,7 +130,7 @@ namespace ISD {
/// This node represents a target intrinsic function with no side effects.
/// The first operand is the ID number of the intrinsic from the
/// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
- /// node has returns the result of the intrinsic.
+ /// node returns the result of the intrinsic.
INTRINSIC_WO_CHAIN,
/// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
@@ -508,8 +508,9 @@ namespace ISD {
CALLSEQ_START, // Beginning of a call sequence
CALLSEQ_END, // End of a call sequence
- // VAARG - VAARG has three operands: an input chain, a pointer, and a
- // SRCVALUE. It returns a pair of values: the vaarg value and a new chain.
+ // VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+ // and the alignment. It returns a pair of values: the vaarg value and a
+ // new chain.
VAARG,
// VACOPY - VACOPY has five operands: an input chain, a destination pointer,
diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h
index b4c2f2fe13c1..cd8293de5069 100644
--- a/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -33,7 +33,6 @@ namespace {
(void) llvm::createDeadMachineInstructionElimPass();
- (void) llvm::createLocalRegisterAllocator();
(void) llvm::createFastRegisterAllocator();
(void) llvm::createLinearScanRegisterAllocator();
(void) llvm::createPBQPRegisterAllocator();
diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h
index 637f52bd0bdc..8d80efbc5c77 100644
--- a/include/llvm/CodeGen/LiveInterval.h
+++ b/include/llvm/CodeGen/LiveInterval.h
@@ -53,7 +53,7 @@ namespace llvm {
class VNInfo {
private:
enum {
- HAS_PHI_KILL = 1,
+ HAS_PHI_KILL = 1,
REDEF_BY_EC = 1 << 1,
IS_PHI_DEF = 1 << 2,
IS_UNUSED = 1 << 3,
@@ -67,22 +67,14 @@ namespace llvm {
} cr;
public:
- typedef SpecificBumpPtrAllocator<VNInfo> Allocator;
- typedef SmallVector<SlotIndex, 4> KillSet;
+ typedef BumpPtrAllocator Allocator;
/// The ID number of this value.
unsigned id;
-
+
/// The index of the defining instruction (if isDefAccurate() returns true).
SlotIndex def;
- KillSet kills;
-
- /*
- VNInfo(LiveIntervals &li_)
- : defflags(IS_UNUSED), id(~1U) { cr.copy = 0; }
- */
-
/// VNInfo constructor.
/// d is presumed to point to the actual defining instr. If it doesn't
/// setIsDefAccurate(false) should be called after construction.
@@ -91,7 +83,7 @@ namespace llvm {
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
- : flags(orig.flags), cr(orig.cr), id(i), def(orig.def), kills(orig.kills)
+ : flags(orig.flags), cr(orig.cr), id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
@@ -99,7 +91,6 @@ namespace llvm {
flags = src.flags;
cr = src.cr;
def = src.def;
- kills = src.kills;
}
/// Used for copying value number info.
@@ -114,7 +105,7 @@ namespace llvm {
/// This method should not be called on stack intervals as it may lead to
/// undefined behavior.
void setCopy(MachineInstr *c) { cr.copy = c; }
-
+
/// For a stack interval, returns the reg which this stack interval was
/// defined from.
/// For a register interval the behaviour of this method is undefined.
@@ -144,7 +135,7 @@ namespace llvm {
else
flags &= ~REDEF_BY_EC;
}
-
+
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instrucions may have been eliminated).
bool isPHIDef() const { return flags & IS_PHI_DEF; }
@@ -172,49 +163,9 @@ namespace llvm {
void setIsDefAccurate(bool defAccurate) {
if (defAccurate)
flags |= IS_DEF_ACCURATE;
- else
+ else
flags &= ~IS_DEF_ACCURATE;
}
-
- /// Returns true if the given index is a kill of this value.
- bool isKill(SlotIndex k) const {
- KillSet::const_iterator
- i = std::lower_bound(kills.begin(), kills.end(), k);
- return (i != kills.end() && *i == k);
- }
-
- /// addKill - Add a kill instruction index to the specified value
- /// number.
- void addKill(SlotIndex k) {
- if (kills.empty()) {
- kills.push_back(k);
- } else {
- KillSet::iterator
- i = std::lower_bound(kills.begin(), kills.end(), k);
- kills.insert(i, k);
- }
- }
-
- /// Remove the specified kill index from this value's kills list.
- /// Returns true if the value was present, otherwise returns false.
- bool removeKill(SlotIndex k) {
- KillSet::iterator i = std::lower_bound(kills.begin(), kills.end(), k);
- if (i != kills.end() && *i == k) {
- kills.erase(i);
- return true;
- }
- return false;
- }
-
- /// Remove all kills in the range [s, e).
- void removeKills(SlotIndex s, SlotIndex e) {
- KillSet::iterator
- si = std::lower_bound(kills.begin(), kills.end(), s),
- se = std::upper_bound(kills.begin(), kills.end(), e);
-
- kills.erase(si, se);
- }
-
};
/// LiveRange structure - This represents a simple register range in the
@@ -258,6 +209,8 @@ namespace llvm {
LiveRange(); // DO NOT IMPLEMENT
};
+ template <> struct isPodLike<LiveRange> { static const bool value = true; };
+
raw_ostream& operator<<(raw_ostream& os, const LiveRange &LR);
@@ -366,8 +319,8 @@ namespace llvm {
/// the instruction that defines the value number.
VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI,
bool isDefAccurate, VNInfo::Allocator &VNInfoAllocator) {
- VNInfo *VNI = VNInfoAllocator.Allocate();
- new (VNI) VNInfo((unsigned)valnos.size(), def, CopyMI);
+ VNInfo *VNI =
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def, CopyMI);
VNI->setIsDefAccurate(isDefAccurate);
valnos.push_back(VNI);
return VNI;
@@ -377,23 +330,12 @@ namespace llvm {
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig,
VNInfo::Allocator &VNInfoAllocator) {
- VNInfo *VNI = VNInfoAllocator.Allocate();
- new (VNI) VNInfo((unsigned)valnos.size(), *orig);
+ VNInfo *VNI =
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
valnos.push_back(VNI);
return VNI;
}
- /// addKills - Add a number of kills into the VNInfo kill vector. If this
- /// interval is live at a kill point, then the kill is not added.
- void addKills(VNInfo *VNI, const VNInfo::KillSet &kills) {
- for (unsigned i = 0, e = static_cast<unsigned>(kills.size());
- i != e; ++i) {
- if (!liveBeforeAndAt(kills[i])) {
- VNI->addKill(kills[i]);
- }
- }
- }
-
/// isOnlyLROfValNo - Return true if the specified live range is the only
/// one defined by the its val#.
bool isOnlyLROfValNo(const LiveRange *LR) {
@@ -472,6 +414,17 @@ namespace llvm {
// range.If it does, then check if the previous live range ends at index-1.
bool liveBeforeAndAt(SlotIndex index) const;
+ /// killedAt - Return true if a live range ends at index. Note that the kill
+ /// point is not contained in the half-open live range. It is usually the
+ /// getDefIndex() slot following its last use.
+ bool killedAt(SlotIndex index) const;
+
+ /// killedInRange - Return true if the interval has kills in [Start,End).
+ /// Note that the kill point is considered the end of a live range, so it is
+ /// not contained in the live range. If a live range ends at End, it won't
+ /// be counted as a kill by this method.
+ bool killedInRange(SlotIndex Start, SlotIndex End) const;
+
/// getLiveRangeContaining - Return the live range that contains the
/// specified index, or null if there is none.
const LiveRange *getLiveRangeContaining(SlotIndex Idx) const {
@@ -486,6 +439,12 @@ namespace llvm {
return I == end() ? 0 : &*I;
}
+ /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
+ VNInfo *getVNInfoAt(SlotIndex Idx) const {
+ const_iterator I = FindLiveRangeContaining(Idx);
+ return I == end() ? 0 : I->valno;
+ }
+
/// FindLiveRangeContaining - Return an iterator to the live range that
/// contains the specified index, or end() if there is none.
const_iterator FindLiveRangeContaining(SlotIndex Idx) const;
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 32fa70970d06..5a0d81b5c203 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -133,10 +133,9 @@ namespace llvm {
bool conflictsWithPhysReg(const LiveInterval &li, VirtRegMap &vrm,
unsigned reg);
- /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
- /// it checks for sub-register reference and it can check use as well.
- bool conflictsWithSubPhysRegRef(LiveInterval &li, unsigned Reg,
- bool CheckUse,
+ /// conflictsWithAliasRef - Similar to conflictsWithPhysRegRef except
+ /// it checks for alias uses and defs.
+ bool conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
SmallPtrSet<MachineInstr*,32> &JoinedCopies);
// Interval creation
@@ -229,10 +228,6 @@ namespace llvm {
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
- /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
- /// copy field and returns the source register that defines it.
- unsigned getVNInfoSourceReg(const VNInfo *VNI) const;
-
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void releaseMemory();
@@ -249,12 +244,6 @@ namespace llvm {
addIntervalsForSpills(const LiveInterval& i,
SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
-
- /// addIntervalsForSpillsFast - Quickly create new intervals for spilled
- /// defs / uses without remat or splitting.
- std::vector<LiveInterval*>
- addIntervalsForSpillsFast(const LiveInterval &li,
- const MachineLoopInfo *loopInfo, VirtRegMap &vrm);
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
/// around all defs and uses of the specified interval. Return true if it
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index cc651ca77b77..3cfc47ac4d84 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -19,6 +19,7 @@
namespace llvm {
+class Pass;
class BasicBlock;
class MachineFunction;
class MCSymbol;
@@ -258,6 +259,11 @@ public:
/// machine basic block (i.e., copies all the successors fromMBB and
/// remove all the successors from fromMBB).
void transferSuccessors(MachineBasicBlock *fromMBB);
+
+ /// transferSuccessorsAndUpdatePHIs - Transfers all the successors, as
+ /// in transferSuccessors, and update PHI operands in the successor blocks
+ /// which refer to fromMBB to refer to this.
+ void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
/// isSuccessor - Return true if the specified MBB is a successor of this
/// block.
@@ -276,11 +282,26 @@ public:
/// branch to do so (e.g., a table jump). True is a conservative answer.
bool canFallThrough();
+ /// Returns a pointer to the first instructon in this block that is not a
+ /// PHINode instruction. When adding instruction to the beginning of the
+ /// basic block, they should be added before the returned value, not before
+ /// the first instruction, which might be PHI.
+ /// Returns end() is there's no non-PHI instruction.
+ iterator getFirstNonPHI();
+
/// getFirstTerminator - returns an iterator to the first terminator
/// instruction of this basic block. If a terminator does not exist,
/// it returns end()
iterator getFirstTerminator();
+ /// SplitCriticalEdge - Split the critical edge from this block to the
+ /// given successor block, and return the newly created block, or null
+ /// if splitting is not possible.
+ ///
+ /// This function updates LiveVariables, MachineDominatorTree, and
+ /// MachineLoopInfo, as applicable.
+ MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P);
+
void pop_front() { Insts.pop_front(); }
void pop_back() { Insts.pop_back(); }
void push_back(MachineInstr *MI) { Insts.push_back(MI); }
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h
index fe2c2986b59e..9471316d25d7 100644
--- a/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/include/llvm/CodeGen/MachineFrameInfo.h
@@ -33,16 +33,14 @@ class BitVector;
/// callee saved register in the current frame.
class CalleeSavedInfo {
unsigned Reg;
- const TargetRegisterClass *RegClass;
int FrameIdx;
public:
- CalleeSavedInfo(unsigned R, const TargetRegisterClass *RC, int FI = 0)
- : Reg(R), RegClass(RC), FrameIdx(FI) {}
+ explicit CalleeSavedInfo(unsigned R, int FI = 0)
+ : Reg(R), FrameIdx(FI) {}
// Accessors.
unsigned getReg() const { return Reg; }
- const TargetRegisterClass *getRegClass() const { return RegClass; }
int getFrameIdx() const { return FrameIdx; }
void setFrameIdx(int FI) { FrameIdx = FI; }
};
@@ -100,8 +98,7 @@ class MachineFrameInfo {
// cannot alias any other memory objects.
bool isSpillSlot;
- StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
- bool isSS)
+ StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM, bool isSS)
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
isSpillSlot(isSS) {}
};
@@ -352,8 +349,7 @@ public:
/// efficiency. By default, fixed objects are immutable. This returns an
/// index with a negative value.
///
- int CreateFixedObject(uint64_t Size, int64_t SPOffset,
- bool Immutable, bool isSS);
+ int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable);
/// isFixedObjectIndex - Returns true if the specified index corresponds to a
diff --git a/include/llvm/CodeGen/MachineFunctionPass.h b/include/llvm/CodeGen/MachineFunctionPass.h
index 1a2b12972aba..685e86824c31 100644
--- a/include/llvm/CodeGen/MachineFunctionPass.h
+++ b/include/llvm/CodeGen/MachineFunctionPass.h
@@ -34,9 +34,6 @@ protected:
explicit MachineFunctionPass(intptr_t ID) : FunctionPass(ID) {}
explicit MachineFunctionPass(void *ID) : FunctionPass(ID) {}
- /// createPrinterPass - Get a machine function printer pass.
- Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
-
/// runOnMachineFunction - This method must be overloaded to perform the
/// desired machine code transformation or analysis.
///
@@ -51,7 +48,11 @@ protected:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
private:
- bool runOnFunction(Function &F);
+ /// createPrinterPass - Get a machine function printer pass.
+ virtual Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const;
+
+ virtual bool runOnFunction(Function &F);
};
} // End llvm namespace
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
index cf691bb7f166..e67b2dda1141 100644
--- a/include/llvm/CodeGen/MachineInstr.h
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -215,9 +215,6 @@ public:
bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
- bool isExtractSubreg() const {
- return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
- }
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
@@ -227,7 +224,22 @@ public:
bool isRegSequence() const {
return getOpcode() == TargetOpcode::REG_SEQUENCE;
}
-
+ bool isCopy() const {
+ return getOpcode() == TargetOpcode::COPY;
+ }
+
+ /// isCopyLike - Return true if the instruction behaves like a copy.
+ /// This does not include native copy instructions.
+ bool isCopyLike() const {
+ return isCopy() || isSubregToReg();
+ }
+
+ /// isIdentityCopy - Return true is the instruction is an identity copy.
+ bool isIdentityCopy() const {
+ return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
+ getOperand(0).getSubReg() == getOperand(1).getSubReg();
+ }
+
/// readsRegister - Return true if the MachineInstr reads the specified
/// register. If TargetRegisterInfo is passed, then it also checks if there
/// is a read of a super-register.
@@ -339,6 +351,11 @@ public:
/// copyPredicates - Copies predicate operand(s) from MI.
void copyPredicates(const MachineInstr *MI);
+ /// substituteRegister - Replace all occurrences of FromReg with ToReg:SubIdx,
+ /// properly composing subreg indices where necessary.
+ void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
+ const TargetRegisterInfo &RegInfo);
+
/// addRegisterKilled - We have determined MI kills a register. Look for the
/// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
/// add a implicit operand if it's not found. Returns true if the operand
@@ -359,6 +376,11 @@ public:
void addRegisterDefined(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo = 0);
+ /// setPhysRegsDeadExcept - Mark every physreg used by this instruction as dead
+ /// except those in the UsedRegs list.
+ void setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+ const TargetRegisterInfo &TRI);
+
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
diff --git a/include/llvm/CodeGen/MachineJumpTableInfo.h b/include/llvm/CodeGen/MachineJumpTableInfo.h
index 1b6ab2cd4cbc..62643497655a 100644
--- a/include/llvm/CodeGen/MachineJumpTableInfo.h
+++ b/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -74,7 +74,7 @@ private:
JTEntryKind EntryKind;
std::vector<MachineJumpTableEntry> JumpTables;
public:
- MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
+ explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
JTEntryKind getEntryKind() const { return EntryKind; }
diff --git a/include/llvm/CodeGen/MachineLoopInfo.h b/include/llvm/CodeGen/MachineLoopInfo.h
index 8459a8db9a30..3b3e31e02afa 100644
--- a/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/include/llvm/CodeGen/MachineLoopInfo.h
@@ -64,13 +64,13 @@ class MachineLoopInfo : public MachineFunctionPass {
void operator=(const MachineLoopInfo &); // do not implement
MachineLoopInfo(const MachineLoopInfo &); // do not implement
- LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
-
public:
static char ID; // Pass identification, replacement for typeid
MachineLoopInfo() : MachineFunctionPass(&ID) {}
+ LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
+
/// iterator/begin/end - The interface to the top-level loops in the current
/// function.
///
diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h
index 31858ce8081b..afa2c298a273 100644
--- a/include/llvm/CodeGen/MachineOperand.h
+++ b/include/llvm/CodeGen/MachineOperand.h
@@ -27,6 +27,7 @@ class MachineInstr;
class MachineRegisterInfo;
class MDNode;
class TargetMachine;
+class TargetRegisterInfo;
class raw_ostream;
class MCSymbol;
@@ -246,7 +247,20 @@ public:
assert(isReg() && "Wrong MachineOperand accessor");
SubReg = (unsigned char)subReg;
}
-
+
+ /// substVirtReg - Substitute the current register with the virtual
+ /// subregister Reg:SubReg. Take any existing SubReg index into account,
+ /// using TargetRegisterInfo to compose the subreg indices if necessary.
+ /// Reg must be a virtual register, SubIdx can be 0.
+ ///
+ void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
+
+ /// substPhysReg - Substitute the current register with the physical register
+ /// Reg, taking any existing SubReg into account. For instance,
+ /// substPhysReg(%EAX) will change %reg1024:sub_8bit to %AL.
+ ///
+ void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
+
void setIsUse(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
assert((Val || !isDebug()) && "Marking a debug operation as def");
diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h
index fa14fdc3f19f..066c91b36cf5 100644
--- a/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -35,7 +35,7 @@ class MachineRegisterInfo {
/// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to
/// virtual registers. For each target register class, it keeps a list of
/// virtual registers belonging to the class.
- std::vector<std::vector<unsigned> > RegClass2VRegMap;
+ std::vector<unsigned> *RegClass2VRegMap;
/// RegAllocHints - This vector records register allocation hints for virtual
/// registers. For each virtual register, it keeps a register and hint type
@@ -363,7 +363,18 @@ public:
defusechain_iterator operator++(int) { // Postincrement
defusechain_iterator tmp = *this; ++*this; return tmp;
}
-
+
+ /// skipInstruction - move forward until reaching a different instruction.
+ /// Return the skipped instruction that is no longer pointed to, or NULL if
+ /// already pointing to end().
+ MachineInstr *skipInstruction() {
+ if (!Op) return 0;
+ MachineInstr *MI = Op->getParent();
+ do ++*this;
+ while (Op && Op->getParent() == MI);
+ return MI;
+ }
+
MachineOperand &getOperand() const {
assert(Op && "Cannot dereference end iterator!");
return *Op;
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
index 2f5d57640cd6..7445ec7c92a2 100644
--- a/include/llvm/CodeGen/Passes.h
+++ b/include/llvm/CodeGen/Passes.h
@@ -85,15 +85,10 @@ namespace llvm {
///
FunctionPass *createDeadMachineInstructionElimPass();
- /// Creates a register allocator as the user specified on the command line.
+ /// Creates a register allocator as the user specified on the command line, or
+ /// picks one that matches OptLevel.
///
- FunctionPass *createRegisterAllocator();
-
- /// LocalRegisterAllocation Pass - This pass register allocates the input code
- /// a basic block at a time, yielding code better than the simple register
- /// allocator, but not as good as a global allocator.
- ///
- FunctionPass *createLocalRegisterAllocator();
+ FunctionPass *createRegisterAllocator(CodeGenOpt::Level OptLevel);
/// FastRegisterAllocation Pass - This pass register allocates as fast as
/// possible. It is best suited for debug code where live ranges are short.
@@ -147,10 +142,6 @@ namespace llvm {
/// headers to target specific alignment boundary.
FunctionPass *createCodePlacementOptPass();
- /// getRegisterAllocator - This creates an instance of the register allocator
- /// for the Sparc.
- FunctionPass *getRegisterAllocator(TargetMachine &T);
-
/// IntrinsicLowering Pass - Performs target-independent LLVM IR
/// transformations for highly portable strategies.
FunctionPass *createGCLoweringPass();
diff --git a/include/llvm/CodeGen/PostRAHazardRecognizer.h b/include/llvm/CodeGen/PostRAHazardRecognizer.h
new file mode 100644
index 000000000000..24d73cb7860d
--- /dev/null
+++ b/include/llvm/CodeGen/PostRAHazardRecognizer.h
@@ -0,0 +1,94 @@
+//=- llvm/CodeGen/PostRAHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PostRAHazardRecognizer class, which
+// implements hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/System/DataTypes.h"
+
+#include <cassert>
+#include <cstring>
+#include <string>
+
+namespace llvm {
+
+class InstrItineraryData;
+class SUnit;
+
+class PostRAHazardRecognizer : public ScheduleHazardRecognizer {
+ // ScoreBoard to track function unit usage. ScoreBoard[0] is a
+ // mask of the FUs in use in the cycle currently being
+ // schedule. ScoreBoard[1] is a mask for the next cycle. The
+ // ScoreBoard is used as a circular buffer with the current cycle
+ // indicated by Head.
+ class ScoreBoard {
+ unsigned *Data;
+
+ // The maximum number of cycles monitored by the Scoreboard. This
+ // value is determined based on the target itineraries to ensure
+ // that all hazards can be tracked.
+ size_t Depth;
+ // Indices into the Scoreboard that represent the current cycle.
+ size_t Head;
+ public:
+ ScoreBoard():Data(NULL), Depth(0), Head(0) { }
+ ~ScoreBoard() {
+ delete[] Data;
+ }
+
+ size_t getDepth() const { return Depth; }
+ unsigned& operator[](size_t idx) const {
+ assert(Depth && "ScoreBoard was not initialized properly!");
+
+ return Data[(Head + idx) % Depth];
+ }
+
+ void reset(size_t d = 1) {
+ if (Data == NULL) {
+ Depth = d;
+ Data = new unsigned[Depth];
+ }
+
+ memset(Data, 0, Depth * sizeof(Data[0]));
+ Head = 0;
+ }
+
+ void advance() {
+ Head = (Head + 1) % Depth;
+ }
+
+ // Print the scoreboard.
+ void dump() const;
+ };
+
+ // Itinerary data for the target.
+ const InstrItineraryData &ItinData;
+
+ ScoreBoard ReservedScoreboard;
+ ScoreBoard RequiredScoreboard;
+
+public:
+ PostRAHazardRecognizer(const InstrItineraryData &ItinData);
+
+ virtual HazardType getHazardType(SUnit *SU);
+ virtual void Reset();
+ virtual void EmitInstruction(SUnit *SU);
+ virtual void AdvanceCycle();
+};
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/RegisterCoalescer.h b/include/llvm/CodeGen/RegisterCoalescer.h
index 1490aa0172fb..7644433a33a1 100644
--- a/include/llvm/CodeGen/RegisterCoalescer.h
+++ b/include/llvm/CodeGen/RegisterCoalescer.h
@@ -25,6 +25,9 @@ namespace llvm {
class RegallocQuery;
class AnalysisUsage;
class MachineInstr;
+ class TargetRegisterInfo;
+ class TargetRegisterClass;
+ class TargetInstrInfo;
/// An abstract interface for register coalescers. Coalescers must
/// implement this interface to be part of the coalescer analysis
@@ -141,6 +144,93 @@ namespace llvm {
return true;
}
};
+
+
+ /// CoalescerPair - A helper class for register coalescers. When deciding if
+ /// two registers can be coalesced, CoalescerPair can determine if a copy
+ /// instruction would become an identity copy after coalescing.
+ class CoalescerPair {
+ const TargetInstrInfo &tii_;
+ const TargetRegisterInfo &tri_;
+
+ /// dstReg_ - The register that will be left after coalescing. It can be a
+ /// virtual or physical register.
+ unsigned dstReg_;
+
+ /// srcReg_ - the virtual register that will be coalesced into dstReg.
+ unsigned srcReg_;
+
+ /// subReg_ - The subregister index of srcReg in dstReg_. It is possible the
+ /// coalesce srcReg_ into a subreg of the larger dstReg_ when dstReg_ is a
+ /// virtual register.
+ unsigned subIdx_;
+
+ /// partial_ - True when the original copy was a partial subregister copy.
+ bool partial_;
+
+ /// crossClass_ - True when both regs are virtual, and newRC is constrained.
+ bool crossClass_;
+
+ /// flipped_ - True when DstReg and SrcReg are reversed from the oriignal copy
+ /// instruction.
+ bool flipped_;
+
+ /// newRC_ - The register class of the coalesced register, or NULL if dstReg_
+ /// is a physreg.
+ const TargetRegisterClass *newRC_;
+
+ /// compose - Compose subreg indices a and b, either may be 0.
+ unsigned compose(unsigned, unsigned) const;
+
+ /// isMoveInstr - Return true if MI is a move or subreg instruction.
+ bool isMoveInstr(const MachineInstr *MI, unsigned &Src, unsigned &Dst,
+ unsigned &SrcSub, unsigned &DstSub) const;
+
+ public:
+ CoalescerPair(const TargetInstrInfo &tii, const TargetRegisterInfo &tri)
+ : tii_(tii), tri_(tri), dstReg_(0), srcReg_(0), subIdx_(0),
+ partial_(false), crossClass_(false), flipped_(false), newRC_(0) {}
+
+ /// setRegisters - set registers to match the copy instruction MI. Return
+ /// false if MI is not a coalescable copy instruction.
+ bool setRegisters(const MachineInstr*);
+
+ /// flip - Swap srcReg_ and dstReg_. Return false if swapping is impossible
+ /// because dstReg_ is a physical register, or subIdx_ is set.
+ bool flip();
+
+ /// isCoalescable - Return true if MI is a copy instruction that will become
+ /// an identity copy after coalescing.
+ bool isCoalescable(const MachineInstr*) const;
+
+ /// isPhys - Return true if DstReg is a physical register.
+ bool isPhys() const { return !newRC_; }
+
+ /// isPartial - Return true if the original copy instruction did not copy the
+ /// full register, but was a subreg operation.
+ bool isPartial() const { return partial_; }
+
+ /// isCrossClass - Return true if DstReg is virtual and NewRC is a smaller register class than DstReg's.
+ bool isCrossClass() const { return crossClass_; }
+
+ /// isFlipped - Return true when getSrcReg is the register being defined by
+ /// the original copy instruction.
+ bool isFlipped() const { return flipped_; }
+
+ /// getDstReg - Return the register (virtual or physical) that will remain
+ /// after coalescing.
+ unsigned getDstReg() const { return dstReg_; }
+
+ /// getSrcReg - Return the virtual register that will be coalesced away.
+ unsigned getSrcReg() const { return srcReg_; }
+
+ /// getSubIdx - Return the subregister index in DstReg that SrcReg will be
+ /// coalesced into, or 0.
+ unsigned getSubIdx() const { return subIdx_; }
+
+ /// getNewRC - Return the register class of the coalesced register.
+ const TargetRegisterClass *getNewRC() const { return newRC_; }
+ };
}
// Because of the way .a files work, we must force the SimpleRC
diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h
index 84b726d73fb3..246831c034d4 100644
--- a/include/llvm/CodeGen/RegisterScavenging.h
+++ b/include/llvm/CodeGen/RegisterScavenging.h
@@ -98,6 +98,10 @@ public:
/// getRegsUsed - return all registers currently in use in used.
void getRegsUsed(BitVector &used, bool includeReserved);
+ /// getRegsAvailable - Return all available registers in the register class
+ /// in Mask.
+ void getRegsAvailable(const TargetRegisterClass *RC, BitVector &Mask);
+
/// FindUnusedReg - Find a unused register of the specified register class.
/// Return 0 if none is found.
unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
@@ -147,7 +151,12 @@ private:
/// Add Reg and its aliases to BV.
void addRegWithAliases(BitVector &BV, unsigned Reg);
- unsigned findSurvivorReg(MachineBasicBlock::iterator MI,
+ /// findSurvivorReg - Return the candidate register that is unused for the
+ /// longest after StartMI. UseMI is set to the instruction where the search
+ /// stopped.
+ ///
+ /// No more than InstrLimit instructions are inspected.
+ unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
BitVector &Candidates,
unsigned InstrLimit,
MachineBasicBlock::iterator &UseMI);
diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h
index 42ae5635f527..a51e82a6404a 100644
--- a/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -247,6 +247,40 @@ namespace RTLIB {
// EXCEPTION HANDLING
UNWIND_RESUME,
+ // Family ATOMICs
+ SYNC_VAL_COMPARE_AND_SWAP_1,
+ SYNC_VAL_COMPARE_AND_SWAP_2,
+ SYNC_VAL_COMPARE_AND_SWAP_4,
+ SYNC_VAL_COMPARE_AND_SWAP_8,
+ SYNC_LOCK_TEST_AND_SET_1,
+ SYNC_LOCK_TEST_AND_SET_2,
+ SYNC_LOCK_TEST_AND_SET_4,
+ SYNC_LOCK_TEST_AND_SET_8,
+ SYNC_FETCH_AND_ADD_1,
+ SYNC_FETCH_AND_ADD_2,
+ SYNC_FETCH_AND_ADD_4,
+ SYNC_FETCH_AND_ADD_8,
+ SYNC_FETCH_AND_SUB_1,
+ SYNC_FETCH_AND_SUB_2,
+ SYNC_FETCH_AND_SUB_4,
+ SYNC_FETCH_AND_SUB_8,
+ SYNC_FETCH_AND_AND_1,
+ SYNC_FETCH_AND_AND_2,
+ SYNC_FETCH_AND_AND_4,
+ SYNC_FETCH_AND_AND_8,
+ SYNC_FETCH_AND_OR_1,
+ SYNC_FETCH_AND_OR_2,
+ SYNC_FETCH_AND_OR_4,
+ SYNC_FETCH_AND_OR_8,
+ SYNC_FETCH_AND_XOR_1,
+ SYNC_FETCH_AND_XOR_2,
+ SYNC_FETCH_AND_XOR_4,
+ SYNC_FETCH_AND_XOR_8,
+ SYNC_FETCH_AND_NAND_1,
+ SYNC_FETCH_AND_NAND_2,
+ SYNC_FETCH_AND_NAND_4,
+ SYNC_FETCH_AND_NAND_8,
+
UNKNOWN_LIBCALL
};
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index 97202bdce30f..de49d184131d 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -29,7 +29,6 @@
namespace llvm {
class AliasAnalysis;
-class FunctionLoweringInfo;
class MachineConstantPoolValue;
class MachineFunction;
class MDNode;
@@ -134,7 +133,6 @@ class SelectionDAG {
const TargetLowering &TLI;
const TargetSelectionDAGInfo &TSI;
MachineFunction *MF;
- FunctionLoweringInfo &FLI;
LLVMContext *Context;
/// EntryNode - The starting token.
@@ -187,7 +185,7 @@ class SelectionDAG {
SelectionDAG(const SelectionDAG&); // Do not implement.
public:
- SelectionDAG(const TargetMachine &TM, FunctionLoweringInfo &fli);
+ explicit SelectionDAG(const TargetMachine &TM);
~SelectionDAG();
/// init - Prepare this SelectionDAG to process code in the given
@@ -204,7 +202,6 @@ public:
const TargetMachine &getTarget() const { return TM; }
const TargetLowering &getTargetLoweringInfo() const { return TLI; }
const TargetSelectionDAGInfo &getSelectionDAGInfo() const { return TSI; }
- FunctionLoweringInfo &getFunctionLoweringInfo() const { return FLI; }
LLVMContext *getContext() const {return Context; }
/// viewGraph - Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
@@ -351,13 +348,13 @@ public:
SDValue getTargetConstantFP(const ConstantFP &Val, EVT VT) {
return getConstantFP(Val, VT, true);
}
- SDValue getGlobalAddress(const GlobalValue *GV, EVT VT,
+ SDValue getGlobalAddress(const GlobalValue *GV, DebugLoc DL, EVT VT,
int64_t offset = 0, bool isTargetGA = false,
unsigned char TargetFlags = 0);
- SDValue getTargetGlobalAddress(const GlobalValue *GV, EVT VT,
+ SDValue getTargetGlobalAddress(const GlobalValue *GV, DebugLoc DL, EVT VT,
int64_t offset = 0,
unsigned char TargetFlags = 0) {
- return getGlobalAddress(GV, VT, offset, true, TargetFlags);
+ return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
}
SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
SDValue getTargetFrameIndex(int FI, EVT VT) {
@@ -585,7 +582,7 @@ public:
/// getVAArg - VAArg produces a result and token chain, and takes a pointer
/// and a source value as input.
SDValue getVAArg(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
- SDValue SV);
+ SDValue SV, unsigned Align);
/// getAtomic - Gets a node for an atomic op, produces result and chain and
/// takes 3 operands
@@ -635,18 +632,20 @@ public:
SDValue getLoad(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
const Value *SV, int SVOffset, bool isVolatile,
bool isNonTemporal, unsigned Alignment);
- SDValue getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
+ SDValue getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr, const Value *SV,
int SVOffset, EVT MemVT, bool isVolatile,
bool isNonTemporal, unsigned Alignment);
SDValue getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
- SDValue Offset, ISD::MemIndexedMode AM);
- SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
- EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
+ SDValue Offset, ISD::MemIndexedMode AM);
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
const Value *SV, int SVOffset, EVT MemVT,
bool isVolatile, bool isNonTemporal, unsigned Alignment);
- SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
- EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
EVT MemVT, MachineMemOperand *MMO);
/// getStore - Helper function to build ISD::STORE nodes.
@@ -681,15 +680,15 @@ public:
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
- SDValue UpdateNodeOperands(SDValue N, SDValue Op);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5);
- SDValue UpdateNodeOperands(SDValue N,
+ SDNode *UpdateNodeOperands(SDNode *N,
const SDValue *Ops, unsigned NumOps);
/// SelectNodeTo - These are used for target selectors to *mutate* the
diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h
index 38175808ab41..01d05ddac11a 100644
--- a/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/include/llvm/CodeGen/SelectionDAGISel.h
@@ -280,19 +280,16 @@ private:
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
- void PrepareEHLandingPad(MachineBasicBlock *BB);
+ void PrepareEHLandingPad();
void SelectAllBasicBlocks(const Function &Fn);
- void FinishBasicBlock(MachineBasicBlock *BB);
+ void FinishBasicBlock();
- MachineBasicBlock *SelectBasicBlock(MachineBasicBlock *BB,
- const BasicBlock *LLVMBB,
- BasicBlock::const_iterator Begin,
- BasicBlock::const_iterator End,
- bool &HadTailCall);
- MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB);
+ void SelectBasicBlock(BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End,
+ bool &HadTailCall);
+ void CodeGenAndEmitDAG();
void LowerArguments(const BasicBlock *BB);
- void ShrinkDemandedOps();
void ComputeLiveOutVRegInfo();
/// Create the scheduler. If a specific scheduler was specified
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index fd529b62ce63..4cf6f367edfb 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -549,6 +549,15 @@ public:
return FoundNode;
}
+ /// getFlaggedUser - If this node has a flag value with a user, return
+ /// the user (there is at most one). Otherwise return NULL.
+ SDNode *getFlaggedUser() const {
+ for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
+ if (UI.getUse().get().getValueType() == MVT::Flag)
+ return *UI;
+ return 0;
+ }
+
/// getNumValues - Return the number of values defined/returned by this
/// operator.
///
@@ -1082,6 +1091,7 @@ public:
uint64_t getZExtValue() const { return Value->getZExtValue(); }
int64_t getSExtValue() const { return Value->getSExtValue(); }
+ bool isOne() const { return Value->isOne(); }
bool isNullValue() const { return Value->isNullValue(); }
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
@@ -1130,7 +1140,7 @@ public:
}
bool isExactlyValue(const APFloat& V) const;
- bool isValueValidForType(EVT VT, const APFloat& Val);
+ static bool isValueValidForType(EVT VT, const APFloat& Val);
static bool classof(const ConstantFPSDNode *) { return true; }
static bool classof(const SDNode *N) {
@@ -1144,7 +1154,7 @@ class GlobalAddressSDNode : public SDNode {
int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
- GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA, EVT VT,
+ GlobalAddressSDNode(unsigned Opc, DebugLoc DL, const GlobalValue *GA, EVT VT,
int64_t o, unsigned char TargetFlags);
public:
@@ -1454,125 +1464,6 @@ public:
}
};
-namespace ISD {
- struct ArgFlagsTy {
- private:
- static const uint64_t NoFlagSet = 0ULL;
- static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
- static const uint64_t ZExtOffs = 0;
- static const uint64_t SExt = 1ULL<<1; ///< Sign extended
- static const uint64_t SExtOffs = 1;
- static const uint64_t InReg = 1ULL<<2; ///< Passed in register
- static const uint64_t InRegOffs = 2;
- static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
- static const uint64_t SRetOffs = 3;
- static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
- static const uint64_t ByValOffs = 4;
- static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
- static const uint64_t NestOffs = 5;
- static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
- static const uint64_t ByValAlignOffs = 6;
- static const uint64_t Split = 1ULL << 10;
- static const uint64_t SplitOffs = 10;
- static const uint64_t OrigAlign = 0x1FULL<<27;
- static const uint64_t OrigAlignOffs = 27;
- static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
- static const uint64_t ByValSizeOffs = 32;
-
- static const uint64_t One = 1ULL; //< 1 of this type, for shifts
-
- uint64_t Flags;
- public:
- ArgFlagsTy() : Flags(0) { }
-
- bool isZExt() const { return Flags & ZExt; }
- void setZExt() { Flags |= One << ZExtOffs; }
-
- bool isSExt() const { return Flags & SExt; }
- void setSExt() { Flags |= One << SExtOffs; }
-
- bool isInReg() const { return Flags & InReg; }
- void setInReg() { Flags |= One << InRegOffs; }
-
- bool isSRet() const { return Flags & SRet; }
- void setSRet() { Flags |= One << SRetOffs; }
-
- bool isByVal() const { return Flags & ByVal; }
- void setByVal() { Flags |= One << ByValOffs; }
-
- bool isNest() const { return Flags & Nest; }
- void setNest() { Flags |= One << NestOffs; }
-
- unsigned getByValAlign() const {
- return (unsigned)
- ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
- }
- void setByValAlign(unsigned A) {
- Flags = (Flags & ~ByValAlign) |
- (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
- }
-
- bool isSplit() const { return Flags & Split; }
- void setSplit() { Flags |= One << SplitOffs; }
-
- unsigned getOrigAlign() const {
- return (unsigned)
- ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
- }
- void setOrigAlign(unsigned A) {
- Flags = (Flags & ~OrigAlign) |
- (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
- }
-
- unsigned getByValSize() const {
- return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
- }
- void setByValSize(unsigned S) {
- Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
- }
-
- /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4".
- std::string getArgFlagsString();
-
- /// getRawBits - Represent the flags as a bunch of bits.
- uint64_t getRawBits() const { return Flags; }
- };
-
- /// InputArg - This struct carries flags and type information about a
- /// single incoming (formal) argument or incoming (from the perspective
- /// of the caller) return value virtual register.
- ///
- struct InputArg {
- ArgFlagsTy Flags;
- EVT VT;
- bool Used;
-
- InputArg() : VT(MVT::Other), Used(false) {}
- InputArg(ISD::ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), VT(vt), Used(used) {
- assert(VT.isSimple() &&
- "InputArg value type must be Simple!");
- }
- };
-
- /// OutputArg - This struct carries flags and a value for a
- /// single outgoing (actual) argument or outgoing (from the perspective
- /// of the caller) return value virtual register.
- ///
- struct OutputArg {
- ArgFlagsTy Flags;
- SDValue Val;
- bool IsFixed;
-
- OutputArg() : IsFixed(false) {}
- OutputArg(ISD::ArgFlagsTy flags, SDValue val, bool isfixed)
- : Flags(flags), Val(val), IsFixed(isfixed) {
- assert(Val.getValueType().isSimple() &&
- "OutputArg value type must be Simple!");
- }
- };
-}
-
/// VTSDNode - This class is used to represent EVT's, which are used
/// to parameterize some operations.
class VTSDNode : public SDNode {
diff --git a/include/llvm/CodeGen/SlotIndexes.h b/include/llvm/CodeGen/SlotIndexes.h
index 3c56d0d67dd9..f1f047b44ed2 100644
--- a/include/llvm/CodeGen/SlotIndexes.h
+++ b/include/llvm/CodeGen/SlotIndexes.h
@@ -23,6 +23,7 @@
#define LLVM_CODEGEN_SLOTINDEXES_H
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
@@ -663,15 +664,20 @@ namespace llvm {
MachineBasicBlock::iterator miItr(mi);
bool needRenumber = false;
IndexListEntry *newEntry;
-
+ // Get previous index, considering that not all instructions are indexed.
IndexListEntry *prevEntry;
- if (miItr == mbb->begin()) {
+ for (;;) {
// If mi is at the mbb beginning, get the prev index from the mbb.
- prevEntry = &mbbRangeItr->second.first.entry();
- } else {
- // Otherwise get it from the previous instr.
- MachineBasicBlock::iterator pItr(prior(miItr));
- prevEntry = &getInstructionIndex(pItr).entry();
+ if (miItr == mbb->begin()) {
+ prevEntry = &mbbRangeItr->second.first.entry();
+ break;
+ }
+ // Otherwise rewind until we find a mapped instruction.
+ Mi2IndexMap::const_iterator itr = mi2iMap.find(--miItr);
+ if (itr != mi2iMap.end()) {
+ prevEntry = &itr->second.entry();
+ break;
+ }
}
// Get next entry from previous entry.
@@ -757,6 +763,47 @@ namespace llvm {
mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
}
+ /// Add the given MachineBasicBlock into the maps.
+ void insertMBBInMaps(MachineBasicBlock *mbb) {
+ MachineFunction::iterator nextMBB =
+ llvm::next(MachineFunction::iterator(mbb));
+ IndexListEntry *startEntry = createEntry(0, 0);
+ IndexListEntry *terminatorEntry = createEntry(0, 0);
+ IndexListEntry *nextEntry = 0;
+
+ if (nextMBB == mbb->getParent()->end()) {
+ nextEntry = getTail();
+ } else {
+ nextEntry = &getMBBStartIdx(nextMBB).entry();
+ }
+
+ insert(nextEntry, startEntry);
+ insert(nextEntry, terminatorEntry);
+
+ SlotIndex startIdx(startEntry, SlotIndex::LOAD);
+ SlotIndex terminatorIdx(terminatorEntry, SlotIndex::PHI_BIT);
+ SlotIndex endIdx(nextEntry, SlotIndex::LOAD);
+
+ terminatorGaps.insert(
+ std::make_pair(mbb, terminatorIdx));
+
+ mbb2IdxMap.insert(
+ std::make_pair(mbb, std::make_pair(startIdx, endIdx)));
+
+ idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
+
+ if (MachineFunction::iterator(mbb) != mbb->getParent()->begin()) {
+ // Have to update the end index of the previous block.
+ MachineBasicBlock *priorMBB =
+ llvm::prior(MachineFunction::iterator(mbb));
+ mbb2IdxMap[priorMBB].second = startIdx;
+ }
+
+ renumberIndexes();
+ std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+
+ }
+
};
diff --git a/include/llvm/Config/config.h.in b/include/llvm/Config/config.h.in
index 99d2ab5c15b3..d12f82a9012c 100644
--- a/include/llvm/Config/config.h.in
+++ b/include/llvm/Config/config.h.in
@@ -63,6 +63,9 @@
/* Define to 1 if you have the `closedir' function. */
#undef HAVE_CLOSEDIR
+/* Define to 1 if you have the <CrashReporterClient.h> header file. */
+#undef HAVE_CRASHREPORTERCLIENT_H
+
/* Define to 1 if you have the <ctype.h> header file. */
#undef HAVE_CTYPE_H
diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h
index c3f190233e22..3287b39a3c95 100644
--- a/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -174,8 +174,8 @@ public:
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true,
- CodeModel::Model CMM =
- CodeModel::Default);
+ CodeModel::Model CMM =
+ CodeModel::Default);
/// addModule - Add a Module to the list of modules that we can JIT from.
/// Note that this takes ownership of the Module: when the ExecutionEngine is
diff --git a/include/llvm/GlobalValue.h b/include/llvm/GlobalValue.h
index 658967d81a34..d175080a6674 100644
--- a/include/llvm/GlobalValue.h
+++ b/include/llvm/GlobalValue.h
@@ -40,6 +40,7 @@ public:
InternalLinkage, ///< Rename collisions when linking (static functions).
PrivateLinkage, ///< Like Internal, but omit from symbol table.
LinkerPrivateLinkage, ///< Like Private, but linker removes.
+ LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak.
DLLImportLinkage, ///< Function to be imported from DLL
DLLExportLinkage, ///< Function to be accessible from DLL.
ExternalWeakLinkage,///< ExternalWeak linkage description.
@@ -132,11 +133,14 @@ public:
return Linkage == PrivateLinkage;
}
static bool isLinkerPrivateLinkage(LinkageTypes Linkage) {
- return Linkage==LinkerPrivateLinkage;
+ return Linkage == LinkerPrivateLinkage;
+ }
+ static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkerPrivateWeakLinkage;
}
static bool isLocalLinkage(LinkageTypes Linkage) {
return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) ||
- isLinkerPrivateLinkage(Linkage);
+ isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage);
}
static bool isDLLImportLinkage(LinkageTypes Linkage) {
return Linkage == DLLImportLinkage;
@@ -158,7 +162,8 @@ public:
return (Linkage == WeakAnyLinkage ||
Linkage == LinkOnceAnyLinkage ||
Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage);
+ Linkage == ExternalWeakLinkage ||
+ Linkage == LinkerPrivateWeakLinkage);
}
/// isWeakForLinker - Whether the definition of this global may be replaced at
@@ -170,7 +175,8 @@ public:
Linkage == LinkOnceAnyLinkage ||
Linkage == LinkOnceODRLinkage ||
Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage);
+ Linkage == ExternalWeakLinkage ||
+ Linkage == LinkerPrivateWeakLinkage);
}
bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
@@ -187,6 +193,9 @@ public:
bool hasInternalLinkage() const { return isInternalLinkage(Linkage); }
bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); }
bool hasLinkerPrivateLinkage() const { return isLinkerPrivateLinkage(Linkage); }
+ bool hasLinkerPrivateWeakLinkage() const {
+ return isLinkerPrivateWeakLinkage(Linkage);
+ }
bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); }
bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); }
diff --git a/include/llvm/InlineAsm.h b/include/llvm/InlineAsm.h
index f4d125b4dc39..105b1bcd94c5 100644
--- a/include/llvm/InlineAsm.h
+++ b/include/llvm/InlineAsm.h
@@ -154,7 +154,8 @@ public:
Op_InputChain = 0,
Op_AsmString = 1,
Op_MDNode = 2,
- Op_FirstOperand = 3,
+ Op_IsAlignStack = 3,
+ Op_FirstOperand = 4,
Kind_RegUse = 1,
Kind_RegDef = 2,
diff --git a/include/llvm/InstrTypes.h b/include/llvm/InstrTypes.h
index 49cdd6a04e39..6715416afa1c 100644
--- a/include/llvm/InstrTypes.h
+++ b/include/llvm/InstrTypes.h
@@ -612,7 +612,7 @@ public:
/// A lossless cast is one that does not alter the basic value. It implies
/// a no-op cast but is more stringent, preventing things like int->float,
- /// long->double, int->ptr, or vector->anything.
+ /// long->double, or int->ptr.
/// @returns true iff the cast is lossless.
/// @brief Determine if this is a lossless cast.
bool isLosslessCast() const;
@@ -625,6 +625,14 @@ public:
/// platform. Generally, the result of TargetData::getIntPtrType() should be
/// passed in. If that's not available, use Type::Int64Ty, which will make
/// the isNoopCast call conservative.
+ /// @brief Determine if the described cast is a no-op cast.
+ static bool isNoopCast(
+ Instruction::CastOps Opcode, ///< Opcode of cast
+ const Type *SrcTy, ///< SrcTy of cast
+ const Type *DstTy, ///< DstTy of cast
+ const Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
+ );
+
/// @brief Determine if this cast is a no-op cast.
bool isNoopCast(
const Type *IntPtrTy ///< Integer type corresponding to pointer
diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h
index 413a595ab38a..af93a294cc88 100644
--- a/include/llvm/Instructions.h
+++ b/include/llvm/Instructions.h
@@ -235,6 +235,9 @@ public:
void setAlignment(unsigned Align);
+ Value *getValueOperand() { return getOperand(0); }
+ const Value *getValueOperand() const { return getOperand(0); }
+
Value *getPointerOperand() { return getOperand(1); }
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
@@ -883,14 +886,14 @@ public:
InputIterator ArgBegin, InputIterator ArgEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
- return new((unsigned)(ArgEnd - ArgBegin + 1))
+ return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertBefore);
}
template<typename InputIterator>
static CallInst *Create(Value *Func,
InputIterator ArgBegin, InputIterator ArgEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return new((unsigned)(ArgEnd - ArgBegin + 1))
+ return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertAtEnd);
}
static CallInst *Create(Value *F, Value *Actual,
@@ -919,6 +922,7 @@ public:
static Instruction *CreateMalloc(Instruction *InsertBefore,
const Type *IntPtrTy, const Type *AllocTy,
Value *AllocSize, Value *ArraySize = 0,
+ Function* MallocF = 0,
const Twine &Name = "");
static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
const Type *IntPtrTy, const Type *AllocTy,
@@ -926,7 +930,7 @@ public:
Function* MallocF = 0,
const Twine &Name = "");
/// CreateFree - Generate the IR for a call to the builtin free function.
- static void CreateFree(Value* Source, Instruction *InsertBefore);
+ static Instruction* CreateFree(Value* Source, Instruction *InsertBefore);
static Instruction* CreateFree(Value* Source, BasicBlock *InsertAtEnd);
~CallInst();
@@ -937,8 +941,33 @@ public:
unsigned(isTC));
}
+ /// @deprecated these "define hacks" will go away soon
+ /// @brief coerce out-of-tree code to abandon the low-level interfaces
+ /// @detail see below comments and update your code to high-level interfaces
+ /// - getOperand(0) ---> getCalledValue(), or possibly getCalledFunction
+ /// - setOperand(0, V) ---> setCalledFunction(V)
+ ///
+ /// in LLVM v2.8-only code
+ /// - getOperand(N+1) ---> getArgOperand(N)
+ /// - setOperand(N+1, V) ---> setArgOperand(N, V)
+ /// - getNumOperands() ---> getNumArgOperands()+1 // note the "+1"!
+ ///
+ /// in backward compatible code please consult llvm/Support/CallSite.h,
+ /// you should create a callsite using the CallInst pointer and call its
+ /// methods
+ ///
+# define public private
+# define protected private
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+# undef public
+# undef protected
+public:
+
+ enum { ArgOffset = 0 }; ///< temporary, do not use for new code!
+ unsigned getNumArgOperands() const { return getNumOperands() - 1; }
+ Value *getArgOperand(unsigned i) const { return getOperand(i + ArgOffset); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i + ArgOffset, v); }
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
@@ -974,7 +1003,7 @@ public:
/// @brief Return true if the call should not be inlined.
bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
- void setIsNoInline(bool Value) {
+ void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
}
@@ -998,18 +1027,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
@@ -1031,17 +1056,17 @@ public:
/// indirect function invocation.
///
Function *getCalledFunction() const {
- return dyn_cast<Function>(Op<0>());
+ return dyn_cast<Function>(Op<ArgOffset -1>());
}
/// getCalledValue - Get a pointer to the function that is invoked by this
/// instruction.
- const Value *getCalledValue() const { return Op<0>(); }
- Value *getCalledValue() { return Op<0>(); }
+ const Value *getCalledValue() const { return Op<ArgOffset -1>(); }
+ Value *getCalledValue() { return Op<ArgOffset -1>(); }
/// setCalledFunction - Set the function called.
void setCalledFunction(Value* Fn) {
- Op<0>() = Fn;
+ Op<ArgOffset -1>() = Fn;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -1071,7 +1096,7 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
->getElementType())->getReturnType(),
Instruction::Call,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
- (unsigned)(ArgEnd - ArgBegin + 1), InsertAtEnd) {
+ unsigned(ArgEnd - ArgBegin + 1), InsertAtEnd) {
init(Func, ArgBegin, ArgEnd, NameStr,
typename std::iterator_traits<InputIterator>::iterator_category());
}
@@ -1083,11 +1108,15 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
->getElementType())->getReturnType(),
Instruction::Call,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
- (unsigned)(ArgEnd - ArgBegin + 1), InsertBefore) {
+ unsigned(ArgEnd - ArgBegin + 1), InsertBefore) {
init(Func, ArgBegin, ArgEnd, NameStr,
typename std::iterator_traits<InputIterator>::iterator_category());
}
+
+// Note: if you get compile errors about private methods then
+// please update your code to use the high-level operand
+// interfaces. See line 943 above.
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)
//===----------------------------------------------------------------------===//
@@ -2432,6 +2461,10 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+ unsigned getNumArgOperands() const { return getNumOperands() - 3; }
+ Value *getArgOperand(unsigned i) const { return getOperand(i); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
CallingConv::ID getCallingConv() const {
@@ -2465,11 +2498,11 @@ public:
/// @brief Return true if the call should not be inlined.
bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
- void setIsNoInline(bool Value) {
+ void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
}
-
+
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
return paramHasAttr(~0, Attribute::ReadNone);
@@ -2489,18 +2522,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
diff --git a/include/llvm/IntrinsicInst.h b/include/llvm/IntrinsicInst.h
index 5b0e90f5cc07..48f2da9b76c6 100644
--- a/include/llvm/IntrinsicInst.h
+++ b/include/llvm/IntrinsicInst.h
@@ -43,7 +43,7 @@ namespace llvm {
Intrinsic::ID getIntrinsicID() const {
return (Intrinsic::ID)getCalledFunction()->getIntrinsicID();
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const IntrinsicInst *) { return true; }
static inline bool classof(const CallInst *I) {
@@ -74,7 +74,7 @@ namespace llvm {
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
-
+
static Value *StripCast(Value *C);
};
@@ -83,7 +83,7 @@ namespace llvm {
class DbgDeclareInst : public DbgInfoIntrinsic {
public:
Value *getAddress() const;
- MDNode *getVariable() const { return cast<MDNode>(getOperand(2)); }
+ MDNode *getVariable() const { return cast<MDNode>(getArgOperand(1)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const DbgDeclareInst *) { return true; }
@@ -103,9 +103,9 @@ namespace llvm {
Value *getValue();
uint64_t getOffset() const {
return cast<ConstantInt>(
- const_cast<Value*>(getOperand(2)))->getZExtValue();
+ const_cast<Value*>(getArgOperand(1)))->getZExtValue();
}
- MDNode *getVariable() const { return cast<MDNode>(getOperand(3)); }
+ MDNode *getVariable() const { return cast<MDNode>(getArgOperand(2)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const DbgValueInst *) { return true; }
@@ -121,22 +121,22 @@ namespace llvm {
///
class MemIntrinsic : public IntrinsicInst {
public:
- Value *getRawDest() const { return const_cast<Value*>(getOperand(1)); }
+ Value *getRawDest() const { return const_cast<Value*>(getArgOperand(0)); }
- Value *getLength() const { return const_cast<Value*>(getOperand(3)); }
+ Value *getLength() const { return const_cast<Value*>(getArgOperand(2)); }
ConstantInt *getAlignmentCst() const {
- return cast<ConstantInt>(const_cast<Value*>(getOperand(4)));
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(3)));
}
-
+
unsigned getAlignment() const {
return getAlignmentCst()->getZExtValue();
}
ConstantInt *getVolatileCst() const {
- return cast<ConstantInt>(const_cast<Value*>(getOperand(5)));
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(4)));
}
bool isVolatile() const {
- return getVolatileCst()->getZExtValue() != 0;
+ return !getVolatileCst()->isZero();
}
/// getDest - This is just like getRawDest, but it strips off any cast
@@ -149,27 +149,27 @@ namespace llvm {
void setDest(Value *Ptr) {
assert(getRawDest()->getType() == Ptr->getType() &&
"setDest called with pointer of wrong type!");
- setOperand(1, Ptr);
+ setArgOperand(0, Ptr);
}
void setLength(Value *L) {
assert(getLength()->getType() == L->getType() &&
"setLength called with value of wrong type!");
- setOperand(3, L);
+ setArgOperand(2, L);
}
-
+
void setAlignment(Constant* A) {
- setOperand(4, A);
+ setArgOperand(3, A);
}
void setVolatile(Constant* V) {
- setOperand(5, V);
+ setArgOperand(4, V);
}
const Type *getAlignmentType() const {
- return getOperand(4)->getType();
+ return getArgOperand(3)->getType();
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -192,14 +192,14 @@ namespace llvm {
public:
/// get* - Return the arguments to the instruction.
///
- Value *getValue() const { return const_cast<Value*>(getOperand(2)); }
-
+ Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); }
+
void setValue(Value *Val) {
assert(getValue()->getType() == Val->getType() &&
- "setSource called with pointer of wrong type!");
- setOperand(2, Val);
+ "setValue called with value of wrong type!");
+ setArgOperand(1, Val);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemSetInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -209,26 +209,26 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
+
/// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
///
class MemTransferInst : public MemIntrinsic {
public:
/// get* - Return the arguments to the instruction.
///
- Value *getRawSource() const { return const_cast<Value*>(getOperand(2)); }
-
+ Value *getRawSource() const { return const_cast<Value*>(getArgOperand(1)); }
+
/// getSource - This is just like getRawSource, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
Value *getSource() const { return getRawSource()->stripPointerCasts(); }
-
+
void setSource(Value *Ptr) {
assert(getRawSource()->getType() == Ptr->getType() &&
"setSource called with pointer of wrong type!");
- setOperand(2, Ptr);
+ setArgOperand(1, Ptr);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemTransferInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -239,8 +239,8 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
-
+
+
/// MemCpyInst - This class wraps the llvm.memcpy intrinsic.
///
class MemCpyInst : public MemTransferInst {
@@ -282,7 +282,7 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
+
/// MemoryUseIntrinsic - This is the common base class for the memory use
/// marker intrinsics.
///
diff --git a/include/llvm/Intrinsics.td b/include/llvm/Intrinsics.td
index 2b4df54c6142..444f514d59be 100644
--- a/include/llvm/Intrinsics.td
+++ b/include/llvm/Intrinsics.td
@@ -309,10 +309,8 @@ let Properties = [IntrNoMem] in {
def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>;
def int_eh_sjlj_callsite: Intrinsic<[], [llvm_i32_ty]>;
}
-def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>,
- GCCBuiltin<"__builtin_setjmp">;
-def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>,
- GCCBuiltin<"__builtin_longjmp">;
+def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
//===---------------- Generic Variable Attribute Intrinsics----------------===//
//
diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h
index c2375eaab11a..876703b90320 100644
--- a/include/llvm/LinkAllPasses.h
+++ b/include/llvm/LinkAllPasses.h
@@ -113,6 +113,7 @@ namespace {
(void) llvm::createSingleLoopExtractorPass();
(void) llvm::createStripSymbolsPass();
(void) llvm::createStripNonDebugSymbolsPass();
+ (void) llvm::createStripDeadDebugInfoPass();
(void) llvm::createStripDeadPrototypesPass();
(void) llvm::createTailCallEliminationPass();
(void) llvm::createTailDuplicationPass();
diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h
index d9963ec4821a..07ca070ab288 100644
--- a/include/llvm/MC/MCAssembler.h
+++ b/include/llvm/MC/MCAssembler.h
@@ -354,7 +354,7 @@ public:
typedef FragmentListType::reverse_iterator reverse_iterator;
private:
- iplist<MCFragment> Fragments;
+ FragmentListType Fragments;
const MCSection *Section;
/// Ordinal - The section index in the assemblers section list.
@@ -641,7 +641,7 @@ public:
/// in the symbol table, or whether it can be discarded by the assembler. This
/// also effects whether the assembler treats the label as potentially
/// defining a separate atom.
- bool isSymbolLinkerVisible(const MCSymbolData *SD) const;
+ bool isSymbolLinkerVisible(const MCSymbol &SD) const;
/// Emit the section contents using the given object writer.
//
diff --git a/include/llvm/MC/MCContext.h b/include/llvm/MC/MCContext.h
index 03b5fb0c1fe4..a57b5bf745d3 100644
--- a/include/llvm/MC/MCContext.h
+++ b/include/llvm/MC/MCContext.h
@@ -14,6 +14,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
namespace llvm {
class MCAsmInfo;
@@ -54,6 +55,17 @@ namespace llvm {
/// for the LocalLabelVal and adds it to the map if needed.
unsigned GetInstance(int64_t LocalLabelVal);
+ /// The file name of the log file from the enviromment variable
+ /// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique
+ /// directive is used or it is an error.
+ char *SecureLogFile;
+ /// The stream that gets written to for the .secure_log_unique directive.
+ raw_ostream *SecureLog;
+ /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to
+ /// catch errors if .secure_log_unique appears twice without
+ /// .secure_log_reset appearing between them.
+ bool SecureLogUsed;
+
/// Allocator - Allocator object used for creating machine code objects.
///
/// We use a bump pointer allocator to avoid the need to track all allocated
@@ -127,6 +139,16 @@ namespace llvm {
/// @}
+ char *getSecureLogFile() { return SecureLogFile; }
+ raw_ostream *getSecureLog() { return SecureLog; }
+ bool getSecureLogUsed() { return SecureLogUsed; }
+ void setSecureLog(raw_ostream *Value) {
+ SecureLog = Value;
+ }
+ void setSecureLogUsed(bool Value) {
+ SecureLogUsed = Value;
+ }
+
void *Allocate(unsigned Size, unsigned Align = 8) {
return Allocator.Allocate(Size, Align);
}
diff --git a/include/llvm/MC/MCDirectives.h b/include/llvm/MC/MCDirectives.h
index 1f7364d8124e..223b09ef7aee 100644
--- a/include/llvm/MC/MCDirectives.h
+++ b/include/llvm/MC/MCDirectives.h
@@ -38,7 +38,8 @@ enum MCSymbolAttr {
MCSA_Reference, ///< .reference (MachO)
MCSA_Weak, ///< .weak
MCSA_WeakDefinition, ///< .weak_definition (MachO)
- MCSA_WeakReference ///< .weak_reference (MachO)
+ MCSA_WeakReference, ///< .weak_reference (MachO)
+ MCSA_WeakDefAutoPrivate ///< .weak_def_can_be_hidden (MachO)
};
enum MCAssemblerFlag {
diff --git a/include/llvm/MC/MCObjectStreamer.h b/include/llvm/MC/MCObjectStreamer.h
new file mode 100644
index 000000000000..7b9ff00fc078
--- /dev/null
+++ b/include/llvm/MC/MCObjectStreamer.h
@@ -0,0 +1,56 @@
+//===- MCObjectStreamer.h - MCStreamer Object File Interface ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTSTREAMER_H
+#define LLVM_MC_MCOBJECTSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class MCAssembler;
+class MCCodeEmitter;
+class MCSectionData;
+class TargetAsmBackend;
+class raw_ostream;
+
+/// \brief Streaming object file generation interface.
+///
+/// This class provides an implementation of the MCStreamer interface which is
+/// suitable for use with the assembler backend. Specific object file formats
+/// are expected to subclass this interface to implement directives specific
+/// to that file format or custom semantics expected by the object writer
+/// implementation.
+class MCObjectStreamer : public MCStreamer {
+ MCAssembler *Assembler;
+ MCSectionData *CurSectionData;
+
+protected:
+ MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &_OS, MCCodeEmitter *_Emitter);
+ ~MCObjectStreamer();
+
+ MCSectionData *getCurrentSectionData() const {
+ return CurSectionData;
+ }
+
+public:
+ MCAssembler &getAssembler() { return *Assembler; }
+
+ /// @name MCStreamer Interface
+ /// @{
+
+ virtual void SwitchSection(const MCSection *Section);
+ virtual void Finish();
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/MC/MCObjectWriter.h b/include/llvm/MC/MCObjectWriter.h
index e900584bbf7a..22eea7e022e3 100644
--- a/include/llvm/MC/MCObjectWriter.h
+++ b/include/llvm/MC/MCObjectWriter.h
@@ -162,6 +162,8 @@ public:
/// @}
};
+MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS);
+
} // End llvm namespace
#endif
diff --git a/include/llvm/MC/MCParser/AsmLexer.h b/include/llvm/MC/MCParser/AsmLexer.h
index cf6eefb40831..21878899cac1 100644
--- a/include/llvm/MC/MCParser/AsmLexer.h
+++ b/include/llvm/MC/MCParser/AsmLexer.h
@@ -33,8 +33,6 @@ class AsmLexer : public MCAsmLexer {
const char *CurPtr;
const MemoryBuffer *CurBuf;
- const char *TokStart;
-
void operator=(const AsmLexer&); // DO NOT IMPLEMENT
AsmLexer(const AsmLexer&); // DO NOT IMPLEMENT
@@ -48,9 +46,7 @@ public:
void setBuffer(const MemoryBuffer *buf, const char *ptr = NULL);
- SMLoc getLoc() const;
-
- StringRef LexUntilEndOfStatement();
+ virtual StringRef LexUntilEndOfStatement();
bool isAtStartOfComment(char Char);
diff --git a/include/llvm/MC/MCParser/AsmParser.h b/include/llvm/MC/MCParser/AsmParser.h
index e929fd101df0..82b120bad421 100644
--- a/include/llvm/MC/MCParser/AsmParser.h
+++ b/include/llvm/MC/MCParser/AsmParser.h
@@ -26,6 +26,7 @@
namespace llvm {
class AsmCond;
class AsmToken;
+class MCAsmParserExtension;
class MCContext;
class MCExpr;
class MCInst;
@@ -36,11 +37,15 @@ class TargetAsmParser;
class Twine;
class AsmParser : public MCAsmParser {
+ AsmParser(const AsmParser &); // DO NOT IMPLEMENT
+ void operator=(const AsmParser &); // DO NOT IMPLEMENT
private:
AsmLexer Lexer;
MCContext &Ctx;
MCStreamer &Out;
SourceMgr &SrcMgr;
+ MCAsmParserExtension *GenericParser;
+ MCAsmParserExtension *PlatformParser;
TargetAsmParser *TargetParser;
/// This is the current buffer index we're lexing from as managed by the
@@ -54,26 +59,28 @@ private:
/// invoked after the directive identifier is read and is responsible for
/// parsing and validating the rest of the directive. The handler is passed
/// in the directive name and the location of the directive keyword.
- StringMap<bool(AsmParser::*)(StringRef, SMLoc)> DirectiveMap;
+ StringMap<std::pair<MCAsmParserExtension*, DirectiveHandler> > DirectiveMap;
public:
- AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
+ AsmParser(const Target &T, SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
const MCAsmInfo &MAI);
~AsmParser();
bool Run(bool NoInitialTextSection, bool NoFinalize = false);
-
- void AddDirectiveHandler(StringRef Directive,
- bool (AsmParser::*Handler)(StringRef, SMLoc)) {
- DirectiveMap[Directive] = Handler;
+ void AddDirectiveHandler(MCAsmParserExtension *Object,
+ StringRef Directive,
+ DirectiveHandler Handler) {
+ DirectiveMap[Directive] = std::make_pair(Object, Handler);
}
+
public:
TargetAsmParser &getTargetParser() const { return *TargetParser; }
- void setTargetParser(TargetAsmParser &P) { TargetParser = &P; }
+ void setTargetParser(TargetAsmParser &P);
/// @name MCAsmParser Interface
/// {
+ virtual SourceMgr &getSourceManager() { return SrcMgr; }
virtual MCAsmLexer &getLexer() { return Lexer; }
virtual MCContext &getContext() { return Ctx; }
virtual MCStreamer &getStreamer() { return Out; }
@@ -91,12 +98,8 @@ public:
/// }
private:
- MCSymbol *CreateSymbol(StringRef Name);
-
bool ParseStatement();
- bool TokError(const char *Msg);
-
void PrintMessage(SMLoc Loc, const std::string &Msg, const char *Type) const;
/// EnterIncludeFile - Enter the specified file. This returns true on failure.
@@ -115,10 +118,6 @@ private:
bool ParseIdentifier(StringRef &Res);
// Directive Parsing.
- bool ParseDirectiveDarwinSection(); // Darwin specific ".section".
- bool ParseDirectiveSectionSwitch(const char *Segment, const char *Section,
- unsigned TAA = 0, unsigned ImplicitAlign = 0,
- unsigned StubSize = 0);
bool ParseDirectiveAscii(bool ZeroTerminated); // ".ascii", ".asciiz"
bool ParseDirectiveValue(unsigned Size); // ".byte", ".long", ...
bool ParseDirectiveFill(); // ".fill"
@@ -132,17 +131,8 @@ private:
/// accepts a single symbol (which should be a label or an external).
bool ParseDirectiveSymbolAttribute(MCSymbolAttr Attr);
bool ParseDirectiveELFType(); // ELF specific ".type"
- bool ParseDirectiveDarwinSymbolDesc(); // Darwin specific ".desc"
- bool ParseDirectiveDarwinLsym(); // Darwin specific ".lsym"
bool ParseDirectiveComm(bool IsLocal); // ".comm" and ".lcomm"
- bool ParseDirectiveDarwinZerofill(); // Darwin specific ".zerofill"
- bool ParseDirectiveDarwinTBSS(); // Darwin specific ".tbss"
-
- // Darwin specific ".subsections_via_symbols"
- bool ParseDirectiveDarwinSubsectionsViaSymbols();
- // Darwin specific .dump and .load
- bool ParseDirectiveDarwinDumpOrLoad(SMLoc IDLoc, bool IsDump);
bool ParseDirectiveAbort(); // ".abort"
bool ParseDirectiveInclude(); // ".include"
@@ -152,10 +142,6 @@ private:
bool ParseDirectiveElse(SMLoc DirectiveLoc); // ".else"
bool ParseDirectiveEndIf(SMLoc DirectiveLoc); // .endif
- bool ParseDirectiveFile(StringRef, SMLoc DirectiveLoc); // ".file"
- bool ParseDirectiveLine(StringRef, SMLoc DirectiveLoc); // ".line"
- bool ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc); // ".loc"
-
/// ParseEscapedString - Parse the current token as a string which may include
/// escaped characters and return the string contents.
bool ParseEscapedString(std::string &Data);
diff --git a/include/llvm/MC/MCParser/MCAsmLexer.h b/include/llvm/MC/MCParser/MCAsmLexer.h
index bd1496f35a25..d690e810bd39 100644
--- a/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -121,6 +121,8 @@ class MCAsmLexer {
MCAsmLexer(const MCAsmLexer &); // DO NOT IMPLEMENT
void operator=(const MCAsmLexer &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
+ const char *TokStart;
+
MCAsmLexer();
virtual AsmToken LexToken() = 0;
@@ -141,6 +143,11 @@ public:
return CurTok = LexToken();
}
+ virtual StringRef LexUntilEndOfStatement() = 0;
+
+ /// getLoc - Get the current source location.
+ SMLoc getLoc() const;
+
/// getTok - Get the current (last) lexed token.
const AsmToken &getTok() {
return CurTok;
diff --git a/include/llvm/MC/MCParser/MCAsmParser.h b/include/llvm/MC/MCParser/MCAsmParser.h
index 7f7f1b60f67d..d0ccd0f61748 100644
--- a/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/include/llvm/MC/MCParser/MCAsmParser.h
@@ -15,35 +15,48 @@
namespace llvm {
class AsmToken;
class MCAsmLexer;
+class MCAsmParserExtension;
class MCContext;
class MCExpr;
class MCStreamer;
class SMLoc;
+class SourceMgr;
+class StringRef;
class Twine;
/// MCAsmParser - Generic assembler parser interface, for use by target specific
/// assembly parsers.
class MCAsmParser {
+public:
+ typedef bool (MCAsmParserExtension::*DirectiveHandler)(StringRef, SMLoc);
+
+private:
MCAsmParser(const MCAsmParser &); // DO NOT IMPLEMENT
void operator=(const MCAsmParser &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
MCAsmParser();
-
+
public:
virtual ~MCAsmParser();
+ virtual void AddDirectiveHandler(MCAsmParserExtension *Object,
+ StringRef Directive,
+ DirectiveHandler Handler) = 0;
+
+ virtual SourceMgr &getSourceManager() = 0;
+
virtual MCAsmLexer &getLexer() = 0;
virtual MCContext &getContext() = 0;
- /// getSteamer - Return the output streamer for the assembler.
+ /// getStreamer - Return the output streamer for the assembler.
virtual MCStreamer &getStreamer() = 0;
/// Warning - Emit a warning at the location \arg L, with the message \arg
/// Msg.
virtual void Warning(SMLoc L, const Twine &Msg) = 0;
- /// Warning - Emit an error at the location \arg L, with the message \arg
+ /// Error - Emit an error at the location \arg L, with the message \arg
/// Msg.
///
/// \return The return value is always true, as an idiomatic convenience to
@@ -53,10 +66,17 @@ public:
/// Lex - Get the next AsmToken in the stream, possibly handling file
/// inclusion first.
virtual const AsmToken &Lex() = 0;
-
+
/// getTok - Get the current AsmToken from the stream.
const AsmToken &getTok();
-
+
+ /// \brief Report an error at the current lexer location.
+ bool TokError(const char *Msg);
+
+ /// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
+ /// and set \arg Res to the identifier contents.
+ virtual bool ParseIdentifier(StringRef &Res) = 0;
+
/// ParseExpression - Parse an arbitrary expression.
///
/// @param Res - The value of the expression. The result is undefined
@@ -64,7 +84,7 @@ public:
/// @result - False on success.
virtual bool ParseExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
bool ParseExpression(const MCExpr *&Res);
-
+
/// ParseParenExpression - Parse an arbitrary expression, assuming that an
/// initial '(' has already been consumed.
///
diff --git a/include/llvm/MC/MCParser/MCAsmParserExtension.h b/include/llvm/MC/MCParser/MCAsmParserExtension.h
new file mode 100644
index 000000000000..ad9ccf79d12a
--- /dev/null
+++ b/include/llvm/MC/MCParser/MCAsmParserExtension.h
@@ -0,0 +1,66 @@
+//===-- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMPARSEREXTENSION_H
+#define LLVM_MC_MCASMPARSEREXTENSION_H
+
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+
+/// \brief Generic interface for extending the MCAsmParser,
+/// which is implemented by target and object file assembly parser
+/// implementations.
+class MCAsmParserExtension {
+ MCAsmParserExtension(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+ void operator=(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+
+ MCAsmParser *Parser;
+
+protected:
+ MCAsmParserExtension();
+
+public:
+ virtual ~MCAsmParserExtension();
+
+ /// \brief Initialize the extension for parsing using the given \arg
+ /// Parser. The extension should use the AsmParser interfaces to register its
+ /// parsing routines.
+ virtual void Initialize(MCAsmParser &Parser);
+
+ /// @name MCAsmParser Proxy Interfaces
+ /// @{
+
+ MCContext &getContext() { return getParser().getContext(); }
+ MCAsmLexer &getLexer() { return getParser().getLexer(); }
+ MCAsmParser &getParser() { return *Parser; }
+ SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
+ MCStreamer &getStreamer() { return getParser().getStreamer(); }
+ void Warning(SMLoc L, const Twine &Msg) {
+ return getParser().Warning(L, Msg);
+ }
+ bool Error(SMLoc L, const Twine &Msg) {
+ return getParser().Error(L, Msg);
+ }
+
+ const AsmToken &Lex() { return getParser().Lex(); }
+
+ const AsmToken &getTok() { return getParser().getTok(); }
+
+ bool TokError(const char *Msg) {
+ return getParser().TokError(Msg);
+ }
+
+ /// @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/MC/MCSection.h b/include/llvm/MC/MCSection.h
index 808767c8da62..5c997357c9d3 100644
--- a/include/llvm/MC/MCSection.h
+++ b/include/llvm/MC/MCSection.h
@@ -23,7 +23,7 @@ namespace llvm {
class MCContext;
class MCAsmInfo;
class raw_ostream;
-
+
/// MCSection - Instances of this class represent a uniqued identifier for a
/// section in the current translation unit. The MCContext class uniques and
/// creates these.
@@ -49,7 +49,7 @@ namespace llvm {
SectionKind getKind() const { return Kind; }
SectionVariant getVariant() const { return Variant; }
-
+
virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const = 0;
@@ -63,7 +63,7 @@ namespace llvm {
static bool classof(const MCSection *) { return true; }
};
-
+
} // end namespace llvm
#endif
diff --git a/include/llvm/MC/MCSectionCOFF.h b/include/llvm/MC/MCSectionCOFF.h
index 938a38896fc2..f828e1060fe6 100644
--- a/include/llvm/MC/MCSectionCOFF.h
+++ b/include/llvm/MC/MCSectionCOFF.h
@@ -16,6 +16,8 @@
#include "llvm/MC/MCSection.h"
+#include "llvm/Support/COFF.h"
+
namespace llvm {
/// MCSectionCOFF - This represents a section on Windows
@@ -47,56 +49,6 @@ namespace llvm {
/// should be printed before the section name
bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
- //FIXME: all COFF enumerations/flags should be standardized into one place...
- // Target/X86COFF.h doesn't seem right as COFF can be used for other targets,
- // MC/WinCOFF.h maybe right as it isn't target or entity specific, and it is
- // pretty low on the dependancy graph (is there any need to support non
- // windows COFF?)
- // here is good for section stuff, but others should go elsewhere
-
- /// Valid section flags.
- enum {
- IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
- IMAGE_SCN_CNT_CODE = 0x00000020,
- IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
- IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
- IMAGE_SCN_LNK_OTHER = 0x00000100,
- IMAGE_SCN_LNK_INFO = 0x00000200,
- IMAGE_SCN_LNK_REMOVE = 0x00000800,
- IMAGE_SCN_LNK_COMDAT = 0x00001000,
- IMAGE_SCN_MEM_FARDATA = 0x00008000,
- IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
- IMAGE_SCN_MEM_16BIT = 0x00020000,
- IMAGE_SCN_MEM_LOCKED = 0x00040000,
- IMAGE_SCN_MEM_PRELOAD = 0x00080000,
- /* these are handled elsewhere
- IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
- IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
- IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
- IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
- IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
- IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
- IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
- */
- IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
- IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
- IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
- IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
- IMAGE_SCN_MEM_SHARED = 0x10000000,
- IMAGE_SCN_MEM_EXECUTE = 0x20000000,
- IMAGE_SCN_MEM_READ = 0x40000000,
- IMAGE_SCN_MEM_WRITE = 0x80000000
- };
-
- enum {
- IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
- IMAGE_COMDAT_SELECT_ANY,
- IMAGE_COMDAT_SELECT_SAME_SIZE,
- IMAGE_COMDAT_SELECT_EXACT_MATCH,
- IMAGE_COMDAT_SELECT_ASSOCIATIVE,
- IMAGE_COMDAT_SELECT_LARGEST
- };
-
StringRef getSectionName() const { return SectionName; }
unsigned getCharacteristics() const { return Characteristics; }
int getSelection () const { return Selection; }
diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h
index 0783159cbb8a..aca7dd3a3957 100644
--- a/include/llvm/MC/MCStreamer.h
+++ b/include/llvm/MC/MCStreamer.h
@@ -314,7 +314,7 @@ namespace llvm {
virtual void EmitRawText(StringRef String);
void EmitRawText(const Twine &String);
- /// Finish - Finish emission of machine code and flush any output.
+ /// Finish - Finish emission of machine code.
virtual void Finish() = 0;
};
@@ -341,12 +341,18 @@ namespace llvm {
MCCodeEmitter *CE = 0,
bool ShowInst = false);
- /// createMachOStreamer - Create a machine code streamer which will generative
+ /// createMachOStreamer - Create a machine code streamer which will generate
/// Mach-O format object files.
MCStreamer *createMachOStreamer(MCContext &Ctx, TargetAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *CE,
bool RelaxAll = false);
+ /// createWinCOFFStreamer - Create a machine code streamer which will
+ /// generate Microsoft COFF format object files.
+ MCStreamer *createWinCOFFStreamer(MCContext &Ctx,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE, raw_ostream &OS);
+
/// createLoggingStreamer - Create a machine code streamer which just logs the
/// API calls and then dispatches to another streamer.
///
diff --git a/include/llvm/MC/SectionKind.h b/include/llvm/MC/SectionKind.h
index c9557f29c9a5..85a91c6b1698 100644
--- a/include/llvm/MC/SectionKind.h
+++ b/include/llvm/MC/SectionKind.h
@@ -29,10 +29,10 @@ class SectionKind {
enum Kind {
/// Metadata - Debug info sections or other metadata.
Metadata,
-
+
/// Text - Text section, used for functions and other executable code.
Text,
-
+
/// ReadOnly - Data that is never written to at program runtime by the
/// program or the dynamic linker. Things in the top-level readonly
/// SectionKind are not mergeable.
@@ -45,7 +45,7 @@ class SectionKind {
/// Mergeable1ByteCString - 1 byte mergable, null terminated, string.
Mergeable1ByteCString,
-
+
/// Mergeable2ByteCString - 2 byte mergable, null terminated, string.
Mergeable2ByteCString,
@@ -56,11 +56,11 @@ class SectionKind {
/// constants together. For example, this can be used to unique
/// constant pool entries etc.
MergeableConst,
-
+
/// MergeableConst4 - This is a section used by 4-byte constants,
/// for example, floats.
MergeableConst4,
-
+
/// MergeableConst8 - This is a section used by 8-byte constants,
/// for example, doubles.
MergeableConst8,
@@ -68,33 +68,33 @@ class SectionKind {
/// MergeableConst16 - This is a section used by 16-byte constants,
/// for example, vectors.
MergeableConst16,
-
+
/// Writeable - This is the base of all segments that need to be written
/// to during program runtime.
-
+
/// ThreadLocal - This is the base of all TLS segments. All TLS
/// objects must be writeable, otherwise there is no reason for them to
/// be thread local!
-
+
/// ThreadBSS - Zero-initialized TLS data objects.
ThreadBSS,
-
+
/// ThreadData - Initialized TLS data objects.
ThreadData,
-
+
/// GlobalWriteableData - Writeable data that is global (not thread
/// local).
-
+
/// BSS - Zero initialized writeable data.
BSS,
-
+
/// BSSLocal - This is BSS (zero initialized and writable) data
/// which has local linkage.
BSSLocal,
-
+
/// BSSExtern - This is BSS data with normal external linkage.
BSSExtern,
-
+
/// Common - Data with common linkage. These represent tentative
/// definitions, which always have a zero initializer and are never
/// marked 'constant'.
@@ -123,20 +123,20 @@ class SectionKind {
/// mark the pages these globals end up on as read-only after it is
/// done with its relocation phase.
ReadOnlyWithRel,
-
+
/// ReadOnlyWithRelLocal - This is data that is readonly by the
/// program, but must be writeable so that the dynamic linker
/// can perform relocations in it. This is used when we know
/// that all the relocations are to globals in this final
/// linked image.
ReadOnlyWithRelLocal
-
+
} K : 8;
public:
-
+
bool isMetadata() const { return K == Metadata; }
bool isText() const { return K == Text; }
-
+
bool isReadOnly() const {
return K == ReadOnly || isMergeableCString() ||
isMergeableConst();
@@ -149,7 +149,7 @@ public:
bool isMergeable1ByteCString() const { return K == Mergeable1ByteCString; }
bool isMergeable2ByteCString() const { return K == Mergeable2ByteCString; }
bool isMergeable4ByteCString() const { return K == Mergeable4ByteCString; }
-
+
bool isMergeableConst() const {
return K == MergeableConst || K == MergeableConst4 ||
K == MergeableConst8 || K == MergeableConst16;
@@ -157,38 +157,38 @@ public:
bool isMergeableConst4() const { return K == MergeableConst4; }
bool isMergeableConst8() const { return K == MergeableConst8; }
bool isMergeableConst16() const { return K == MergeableConst16; }
-
+
bool isWriteable() const {
return isThreadLocal() || isGlobalWriteableData();
}
-
+
bool isThreadLocal() const {
return K == ThreadData || K == ThreadBSS;
}
-
- bool isThreadBSS() const { return K == ThreadBSS; }
- bool isThreadData() const { return K == ThreadData; }
+
+ bool isThreadBSS() const { return K == ThreadBSS; }
+ bool isThreadData() const { return K == ThreadData; }
bool isGlobalWriteableData() const {
return isBSS() || isCommon() || isDataRel() || isReadOnlyWithRel();
}
-
+
bool isBSS() const { return K == BSS || K == BSSLocal || K == BSSExtern; }
bool isBSSLocal() const { return K == BSSLocal; }
bool isBSSExtern() const { return K == BSSExtern; }
-
+
bool isCommon() const { return K == Common; }
-
+
bool isDataRel() const {
return K == DataRel || K == DataRelLocal || K == DataNoRel;
}
-
+
bool isDataRelLocal() const {
return K == DataRelLocal || K == DataNoRel;
}
bool isDataNoRel() const { return K == DataNoRel; }
-
+
bool isReadOnlyWithRel() const {
return K == ReadOnlyWithRel || K == ReadOnlyWithRelLocal;
}
@@ -196,14 +196,14 @@ public:
bool isReadOnlyWithRelLocal() const {
return K == ReadOnlyWithRelLocal;
}
-private:
+private:
static SectionKind get(Kind K) {
SectionKind Res;
Res.K = K;
return Res;
}
public:
-
+
static SectionKind getMetadata() { return get(Metadata); }
static SectionKind getText() { return get(Text); }
static SectionKind getReadOnly() { return get(ReadOnly); }
@@ -234,7 +234,7 @@ public:
return get(ReadOnlyWithRelLocal);
}
};
-
+
} // end namespace llvm
#endif
diff --git a/include/llvm/Module.h b/include/llvm/Module.h
index 901fada3ebfc..5fc0418759af 100644
--- a/include/llvm/Module.h
+++ b/include/llvm/Module.h
@@ -197,11 +197,11 @@ public:
/// Get any module-scope inline assembly blocks.
/// @returns a string containing the module-scope inline assembly blocks.
const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }
-
+
/// @}
/// @name Module Level Mutators
/// @{
-
+
/// Set the module identifier.
void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
@@ -235,12 +235,12 @@ public:
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
/// This ID is uniqued across modules in the current LLVMContext.
unsigned getMDKindID(StringRef Name) const;
-
+
/// getMDKindNames - Populate client supplied SmallVector with the name for
/// custom metadata IDs registered in this LLVMContext. ID #0 is not used,
/// so it is filled in as an empty string.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
-
+
/// @}
/// @name Function Accessors
/// @{
@@ -277,7 +277,7 @@ public:
Constant *getOrInsertTargetIntrinsic(StringRef Name,
const FunctionType *Ty,
AttrListPtr AttributeList);
-
+
/// getFunction - Look up the specified function in the module symbol table.
/// If it does not exist, return null.
Function *getFunction(StringRef Name) const;
@@ -321,14 +321,14 @@ public:
/// @}
/// @name Named Metadata Accessors
/// @{
-
+
/// getNamedMetadata - Return the first NamedMDNode in the module with the
- /// specified name. This method returns null if a NamedMDNode with the
+ /// specified name. This method returns null if a NamedMDNode with the
/// specified name is not found.
- NamedMDNode *getNamedMetadata(StringRef Name) const;
+ NamedMDNode *getNamedMetadata(const Twine &Name) const;
- /// getOrInsertNamedMetadata - Return the first named MDNode in the module
- /// with the specified name. This method returns a new NamedMDNode if a
+ /// getOrInsertNamedMetadata - Return the first named MDNode in the module
+ /// with the specified name. This method returns a new NamedMDNode if a
/// NamedMDNode with the specified name is not found.
NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
@@ -515,15 +515,16 @@ public:
const_named_metadata_iterator named_metadata_begin() const {
return NamedMDList.begin();
}
-
+
/// Get an iterator to the last named metadata.
named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
/// Get a constant iterator to the last named metadata.
const_named_metadata_iterator named_metadata_end() const {
return NamedMDList.end();
}
-
- /// Determine how many NamedMDNodes are in the Module's list of named metadata.
+
+ /// Determine how many NamedMDNodes are in the Module's list of named
+ /// metadata.
size_t named_metadata_size() const { return NamedMDList.size(); }
/// Determine if the list of named metadata is empty.
bool named_metadata_empty() const { return NamedMDList.empty(); }
@@ -535,7 +536,7 @@ public:
/// Print the module to an output stream with AssemblyAnnotationWriter.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW) const;
-
+
/// Dump the module to stderr (for debugging).
void dump() const;
/// This function causes all the subinstructions to "let go" of all references
diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h
index 8d0c47d7bbe3..5a5893140e05 100644
--- a/include/llvm/Pass.h
+++ b/include/llvm/Pass.h
@@ -31,7 +31,6 @@
#include "llvm/System/DataTypes.h"
-#include <cassert>
#include <string>
#include <utility>
#include <vector>
@@ -89,13 +88,8 @@ class Pass {
Pass(const Pass &); // DO NOT IMPLEMENT
public:
- explicit Pass(PassKind K, intptr_t pid) : Resolver(0), PassID(pid), Kind(K) {
- assert(pid && "pid cannot be 0");
- }
- explicit Pass(PassKind K, const void *pid)
- : Resolver(0), PassID((intptr_t)pid), Kind(K) {
- assert(pid && "pid cannot be 0");
- }
+ explicit Pass(PassKind K, intptr_t pid);
+ explicit Pass(PassKind K, const void *pid);
virtual ~Pass();
@@ -138,13 +132,8 @@ public:
virtual PassManagerType getPotentialPassManagerType() const;
// Access AnalysisResolver
- inline void setResolver(AnalysisResolver *AR) {
- assert(!Resolver && "Resolver is already set");
- Resolver = AR;
- }
- inline AnalysisResolver *getResolver() {
- return Resolver;
- }
+ void setResolver(AnalysisResolver *AR);
+ AnalysisResolver *getResolver() const { return Resolver; }
/// getAnalysisUsage - This function should be overriden by passes that need
/// analysis information to do their job. If a pass specifies that it uses a
@@ -170,11 +159,9 @@ public:
/// an analysis interface through multiple inheritance. If needed, it should
/// override this to adjust the this pointer as needed for the specified pass
/// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *) {
- return this;
- }
- virtual ImmutablePass *getAsImmutablePass() { return 0; }
- virtual PMDataManager *getAsPMDataManager() { return 0; }
+ virtual void *getAdjustedAnalysisPointer(const PassInfo *);
+ virtual ImmutablePass *getAsImmutablePass();
+ virtual PMDataManager *getAsPMDataManager();
/// verifyAnalysis() - This member can be implemented by a analysis pass to
/// check state of analysis information.
diff --git a/include/llvm/PassAnalysisSupport.h b/include/llvm/PassAnalysisSupport.h
index d59be3c32a75..977d4f4e30d3 100644
--- a/include/llvm/PassAnalysisSupport.h
+++ b/include/llvm/PassAnalysisSupport.h
@@ -49,22 +49,13 @@ public:
// addRequired - Add the specified ID to the required set of the usage info
// for a pass.
//
- AnalysisUsage &addRequiredID(AnalysisID ID) {
- assert(ID && "Pass class not registered!");
- Required.push_back(ID);
- return *this;
- }
+ AnalysisUsage &addRequiredID(AnalysisID ID);
template<class PassClass>
AnalysisUsage &addRequired() {
return addRequiredID(Pass::getClassPassInfo<PassClass>());
}
- AnalysisUsage &addRequiredTransitiveID(AnalysisID ID) {
- assert(ID && "Pass class not registered!");
- Required.push_back(ID);
- RequiredTransitive.push_back(ID);
- return *this;
- }
+ AnalysisUsage &addRequiredTransitiveID(AnalysisID ID);
template<class PassClass>
AnalysisUsage &addRequiredTransitive() {
AnalysisID ID = Pass::getClassPassInfo<PassClass>();
diff --git a/include/llvm/PassManagers.h b/include/llvm/PassManagers.h
index ed1e80eae69c..81b7e7af816e 100644
--- a/include/llvm/PassManagers.h
+++ b/include/llvm/PassManagers.h
@@ -302,10 +302,7 @@ public:
/// through getAnalysis interface.
virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
- virtual Pass * getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F) {
- assert (0 && "Unable to find on the fly pass");
- return NULL;
- }
+ virtual Pass *getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F);
/// Initialize available analysis information.
void initializeAnalysisInfo() {
diff --git a/include/llvm/PassSupport.h b/include/llvm/PassSupport.h
index b22998985324..b0183513386d 100644
--- a/include/llvm/PassSupport.h
+++ b/include/llvm/PassSupport.h
@@ -109,13 +109,7 @@ public:
}
/// createPass() - Use this method to create an instance of this pass.
- Pass *createPass() const {
- assert((!isAnalysisGroup() || NormalCtor) &&
- "No default implementation found for analysis group!");
- assert(NormalCtor &&
- "Cannot call createPass on PassInfo without default ctor!");
- return NormalCtor();
- }
+ Pass *createPass() const;
/// addInterfaceImplemented - This method is called when this pass is
/// registered as a member of an analysis group with the RegisterAnalysisGroup
diff --git a/include/llvm/Support/CFG.h b/include/llvm/Support/CFG.h
index f07c719f194d..9ba71fcca8a5 100644
--- a/include/llvm/Support/CFG.h
+++ b/include/llvm/Support/CFG.h
@@ -53,7 +53,7 @@ public:
assert(!It.atEnd() && "pred_iterator out of range!");
return cast<TerminatorInst>(*It)->getParent();
}
- inline pointer *operator->() const { return &(operator*()); }
+ inline pointer *operator->() const { return &operator*(); }
inline Self& operator++() { // Preincrement
assert(!It.atEnd() && "pred_iterator out of range!");
diff --git a/include/llvm/Support/COFF.h b/include/llvm/Support/COFF.h
new file mode 100644
index 000000000000..2d4e0544368b
--- /dev/null
+++ b/include/llvm/Support/COFF.h
@@ -0,0 +1,183 @@
+//===-- llvm/Support/COFF.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an definitions used in Windows COFF Files.
+//
+// Structures and enums defined within this file where created using
+// information from Microsofts publicly available PE/COFF format document:
+//
+// Microsoft Portable Executable and Common Object File Format Specification
+// Revision 8.1 - February 15, 2008
+//
+// As of 5/2/2010, hosted by microsoft at:
+// http://www.microsoft.com/whdc/system/platform/firmware/pecoff.mspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WIN_COFF_H
+#define LLVM_SUPPORT_WIN_COFF_H
+
+#include "llvm/System/DataTypes.h"
+#include <cstring>
+
+namespace llvm {
+namespace COFF {
+
+ // Sizes in bytes of various things in the COFF format.
+ enum {
+ HeaderSize = 20,
+ NameSize = 8,
+ SymbolSize = 18,
+ SectionSize = 40,
+ RelocationSize = 10
+ };
+
+ struct header {
+ uint16_t Machine;
+ uint16_t NumberOfSections;
+ uint32_t TimeDateStamp;
+ uint32_t PointerToSymbolTable;
+ uint32_t NumberOfSymbols;
+ uint16_t SizeOfOptionalHeader;
+ uint16_t Characteristics;
+ };
+
+ struct symbol {
+ char Name[NameSize];
+ uint32_t Value;
+ uint16_t Type;
+ uint8_t StorageClass;
+ uint16_t SectionNumber;
+ uint8_t NumberOfAuxSymbols;
+ };
+
+ enum symbol_flags {
+ SF_TypeMask = 0x0000FFFF,
+ SF_TypeShift = 0,
+
+ SF_ClassMask = 0x00FF0000,
+ SF_ClassShift = 16,
+
+ SF_WeakReference = 0x01000000
+ };
+
+ enum symbol_storage_class {
+ IMAGE_SYM_CLASS_END_OF_FUNCTION = -1,
+ IMAGE_SYM_CLASS_NULL = 0,
+ IMAGE_SYM_CLASS_AUTOMATIC = 1,
+ IMAGE_SYM_CLASS_EXTERNAL = 2,
+ IMAGE_SYM_CLASS_STATIC = 3,
+ IMAGE_SYM_CLASS_REGISTER = 4,
+ IMAGE_SYM_CLASS_EXTERNAL_DEF = 5,
+ IMAGE_SYM_CLASS_LABEL = 6,
+ IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7,
+ IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8,
+ IMAGE_SYM_CLASS_ARGUMENT = 9,
+ IMAGE_SYM_CLASS_STRUCT_TAG = 10,
+ IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11,
+ IMAGE_SYM_CLASS_UNION_TAG = 12,
+ IMAGE_SYM_CLASS_TYPE_DEFINITION = 13,
+ IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14,
+ IMAGE_SYM_CLASS_ENUM_TAG = 15,
+ IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16,
+ IMAGE_SYM_CLASS_REGISTER_PARAM = 17,
+ IMAGE_SYM_CLASS_BIT_FIELD = 18,
+ IMAGE_SYM_CLASS_BLOCK = 100,
+ IMAGE_SYM_CLASS_FUNCTION = 101,
+ IMAGE_SYM_CLASS_END_OF_STRUCT = 102,
+ IMAGE_SYM_CLASS_FILE = 103,
+ IMAGE_SYM_CLASS_SECTION = 104,
+ IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105,
+ IMAGE_SYM_CLASS_CLR_TOKEN = 107
+ };
+
+ struct section {
+ char Name[NameSize];
+ uint32_t VirtualSize;
+ uint32_t VirtualAddress;
+ uint32_t SizeOfRawData;
+ uint32_t PointerToRawData;
+ uint32_t PointerToRelocations;
+ uint32_t PointerToLineNumbers;
+ uint16_t NumberOfRelocations;
+ uint16_t NumberOfLineNumbers;
+ uint32_t Characteristics;
+ };
+
+ enum section_characteristics {
+ IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
+ IMAGE_SCN_CNT_CODE = 0x00000020,
+ IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
+ IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
+ IMAGE_SCN_LNK_OTHER = 0x00000100,
+ IMAGE_SCN_LNK_INFO = 0x00000200,
+ IMAGE_SCN_LNK_REMOVE = 0x00000800,
+ IMAGE_SCN_LNK_COMDAT = 0x00001000,
+ IMAGE_SCN_GPREL = 0x00008000,
+ IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
+ IMAGE_SCN_MEM_16BIT = 0x00020000,
+ IMAGE_SCN_MEM_LOCKED = 0x00040000,
+ IMAGE_SCN_MEM_PRELOAD = 0x00080000,
+ IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
+ IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
+ IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
+ IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
+ IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
+ IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
+ IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
+ IMAGE_SCN_ALIGN_128BYTES = 0x00800000,
+ IMAGE_SCN_ALIGN_256BYTES = 0x00900000,
+ IMAGE_SCN_ALIGN_512BYTES = 0x00A00000,
+ IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000,
+ IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000,
+ IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000,
+ IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000,
+ IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
+ IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
+ IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
+ IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
+ IMAGE_SCN_MEM_SHARED = 0x10000000,
+ IMAGE_SCN_MEM_EXECUTE = 0x20000000,
+ IMAGE_SCN_MEM_READ = 0x40000000,
+ IMAGE_SCN_MEM_WRITE = 0x80000000
+ };
+
+ struct relocation {
+ uint32_t VirtualAddress;
+ uint32_t SymbolTableIndex;
+ uint16_t Type;
+ };
+
+ enum relocation_type_x86 {
+ IMAGE_REL_I386_ABSOLUTE = 0x0000,
+ IMAGE_REL_I386_DIR16 = 0x0001,
+ IMAGE_REL_I386_REL16 = 0x0002,
+ IMAGE_REL_I386_DIR32 = 0x0006,
+ IMAGE_REL_I386_DIR32NB = 0x0007,
+ IMAGE_REL_I386_SEG12 = 0x0009,
+ IMAGE_REL_I386_SECTION = 0x000A,
+ IMAGE_REL_I386_SECREL = 0x000B,
+ IMAGE_REL_I386_TOKEN = 0x000C,
+ IMAGE_REL_I386_SECREL7 = 0x000D,
+ IMAGE_REL_I386_REL32 = 0x0014
+ };
+
+ enum {
+ IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
+ IMAGE_COMDAT_SELECT_ANY,
+ IMAGE_COMDAT_SELECT_SAME_SIZE,
+ IMAGE_COMDAT_SELECT_EXACT_MATCH,
+ IMAGE_COMDAT_SELECT_ASSOCIATIVE,
+ IMAGE_COMDAT_SELECT_LARGEST
+ };
+
+} // End namespace llvm.
+} // End namespace COFF.
+
+#endif
diff --git a/include/llvm/Support/CallSite.h b/include/llvm/Support/CallSite.h
index 0650b61fbcfa..38ee08bedf82 100644
--- a/include/llvm/Support/CallSite.h
+++ b/include/llvm/Support/CallSite.h
@@ -204,9 +204,9 @@ public:
CALLSITE_DELEGATE_GETTER(isNoInline());
}
void setIsNoInline(bool Value = true) {
- CALLSITE_DELEGATE_GETTER(setIsNoInline(Value));
+ CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
}
-
+
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
@@ -256,14 +256,14 @@ private:
/// Returns the operand number of the first argument
unsigned getArgumentOffset() const {
if (isCall())
- return 1; // Skip Function (ATM)
+ return CallInst::ArgOffset; // Skip Function (ATM)
else
return 0; // Args are at the front
}
unsigned getArgumentEndOffset() const {
if (isCall())
- return 0; // Unchanged (ATM)
+ return CallInst::ArgOffset ? 0 : 1; // Unchanged (ATM)
else
return 3; // Skip BB, BB, Function
}
@@ -273,7 +273,9 @@ private:
// of the op_*() functions here. See CallSite::getCallee.
//
if (isCall())
- return getInstruction()->op_begin(); // Unchanged (ATM)
+ return CallInst::ArgOffset
+ ? getInstruction()->op_begin() // Unchanged
+ : getInstruction()->op_end() - 1; // Skip Function
else
return getInstruction()->op_end() - 3; // Skip BB, BB, Function
}
diff --git a/include/llvm/Support/Dwarf.h b/include/llvm/Support/Dwarf.h
index 3d25e0368b5a..3ca8d96dfc37 100644
--- a/include/llvm/Support/Dwarf.h
+++ b/include/llvm/Support/Dwarf.h
@@ -300,12 +300,99 @@ enum dwarf_constants {
DW_OP_ne = 0x2e,
DW_OP_lit0 = 0x30,
DW_OP_lit1 = 0x31,
+ DW_OP_lit2 = 0x32,
+ DW_OP_lit3 = 0x33,
+ DW_OP_lit4 = 0x34,
+ DW_OP_lit5 = 0x35,
+ DW_OP_lit6 = 0x36,
+ DW_OP_lit7 = 0x37,
+ DW_OP_lit8 = 0x38,
+ DW_OP_lit9 = 0x39,
+ DW_OP_lit10 = 0x3a,
+ DW_OP_lit11 = 0x3b,
+ DW_OP_lit12 = 0x3c,
+ DW_OP_lit13 = 0x3d,
+ DW_OP_lit14 = 0x3e,
+ DW_OP_lit15 = 0x3f,
+ DW_OP_lit16 = 0x40,
+ DW_OP_lit17 = 0x41,
+ DW_OP_lit18 = 0x42,
+ DW_OP_lit19 = 0x43,
+ DW_OP_lit20 = 0x44,
+ DW_OP_lit21 = 0x45,
+ DW_OP_lit22 = 0x46,
+ DW_OP_lit23 = 0x47,
+ DW_OP_lit24 = 0x48,
+ DW_OP_lit25 = 0x49,
+ DW_OP_lit26 = 0x4a,
+ DW_OP_lit27 = 0x4b,
+ DW_OP_lit28 = 0x4c,
+ DW_OP_lit29 = 0x4d,
+ DW_OP_lit30 = 0x4e,
DW_OP_lit31 = 0x4f,
DW_OP_reg0 = 0x50,
DW_OP_reg1 = 0x51,
+ DW_OP_reg2 = 0x52,
+ DW_OP_reg3 = 0x53,
+ DW_OP_reg4 = 0x54,
+ DW_OP_reg5 = 0x55,
+ DW_OP_reg6 = 0x56,
+ DW_OP_reg7 = 0x57,
+ DW_OP_reg8 = 0x58,
+ DW_OP_reg9 = 0x59,
+ DW_OP_reg10 = 0x5a,
+ DW_OP_reg11 = 0x5b,
+ DW_OP_reg12 = 0x5c,
+ DW_OP_reg13 = 0x5d,
+ DW_OP_reg14 = 0x5e,
+ DW_OP_reg15 = 0x5f,
+ DW_OP_reg16 = 0x60,
+ DW_OP_reg17 = 0x61,
+ DW_OP_reg18 = 0x62,
+ DW_OP_reg19 = 0x63,
+ DW_OP_reg20 = 0x64,
+ DW_OP_reg21 = 0x65,
+ DW_OP_reg22 = 0x66,
+ DW_OP_reg23 = 0x67,
+ DW_OP_reg24 = 0x68,
+ DW_OP_reg25 = 0x69,
+ DW_OP_reg26 = 0x6a,
+ DW_OP_reg27 = 0x6b,
+ DW_OP_reg28 = 0x6c,
+ DW_OP_reg29 = 0x6d,
+ DW_OP_reg30 = 0x6e,
DW_OP_reg31 = 0x6f,
DW_OP_breg0 = 0x70,
DW_OP_breg1 = 0x71,
+ DW_OP_breg2 = 0x72,
+ DW_OP_breg3 = 0x73,
+ DW_OP_breg4 = 0x74,
+ DW_OP_breg5 = 0x75,
+ DW_OP_breg6 = 0x76,
+ DW_OP_breg7 = 0x77,
+ DW_OP_breg8 = 0x78,
+ DW_OP_breg9 = 0x79,
+ DW_OP_breg10 = 0x7a,
+ DW_OP_breg11 = 0x7b,
+ DW_OP_breg12 = 0x7c,
+ DW_OP_breg13 = 0x7d,
+ DW_OP_breg14 = 0x7e,
+ DW_OP_breg15 = 0x7f,
+ DW_OP_breg16 = 0x80,
+ DW_OP_breg17 = 0x81,
+ DW_OP_breg18 = 0x82,
+ DW_OP_breg19 = 0x83,
+ DW_OP_breg20 = 0x84,
+ DW_OP_breg21 = 0x85,
+ DW_OP_breg22 = 0x86,
+ DW_OP_breg23 = 0x87,
+ DW_OP_breg24 = 0x88,
+ DW_OP_breg25 = 0x89,
+ DW_OP_breg26 = 0x8a,
+ DW_OP_breg27 = 0x8b,
+ DW_OP_breg28 = 0x8c,
+ DW_OP_breg29 = 0x8d,
+ DW_OP_breg30 = 0x8e,
DW_OP_breg31 = 0x8f,
DW_OP_regx = 0x90,
DW_OP_fbreg = 0x91,
diff --git a/include/llvm/Support/ELF.h b/include/llvm/Support/ELF.h
index d09db3998c45..6f939e7e3435 100644
--- a/include/llvm/Support/ELF.h
+++ b/include/llvm/Support/ELF.h
@@ -10,11 +10,10 @@
// This header contains common, non-processor-specific data structures and
// constants for the ELF file format.
//
-// The details of the ELF32 bits in this file are largely based on
-// the Tool Interface Standard (TIS) Executable and Linking Format
-// (ELF) Specification Version 1.2, May 1995. The ELF64 stuff is not
-// standardized, as far as I can tell. It was largely based on information
-// I found in OpenBSD header files.
+// The details of the ELF32 bits in this file are largely based on the Tool
+// Interface Standard (TIS) Executable and Linking Format (ELF) Specification
+// Version 1.2, May 1995. The ELF64 stuff is based on ELF-64 Object File Format
+// Version 1.5, Draft 2, May 1998 as well as OpenBSD header files.
//
//===----------------------------------------------------------------------===//
@@ -47,8 +46,23 @@ typedef uint16_t Elf64_Quarter;
// Object file magic string.
static const char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' };
+// e_ident size and indices.
+enum {
+ EI_MAG0 = 0, // File identification index.
+ EI_MAG1 = 1, // File identification index.
+ EI_MAG2 = 2, // File identification index.
+ EI_MAG3 = 3, // File identification index.
+ EI_CLASS = 4, // File class.
+ EI_DATA = 5, // Data encoding.
+ EI_VERSION = 6, // File version.
+ EI_OSABI = 7, // OS/ABI identification.
+ EI_ABIVERSION = 8, // ABI version.
+ EI_PAD = 9, // Start of padding bytes.
+ EI_NIDENT = 16 // Number of bytes in e_ident.
+};
+
struct Elf32_Ehdr {
- unsigned char e_ident[16]; // ELF Identification bytes
+ unsigned char e_ident[EI_NIDENT]; // ELF Identification bytes
Elf32_Half e_type; // Type of file (see ET_* below)
Elf32_Half e_machine; // Required architecture for this file (see EM_*)
Elf32_Word e_version; // Must be equal to 1
@@ -62,17 +76,17 @@ struct Elf32_Ehdr {
Elf32_Half e_shentsize; // Size of an entry in the section header table
Elf32_Half e_shnum; // Number of entries in the section header table
Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
- bool checkMagic () const {
- return (memcmp (e_ident, ElfMagic, strlen (ElfMagic))) == 0;
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
- unsigned char getFileClass () const { return e_ident[4]; }
- unsigned char getDataEncoding () { return e_ident[5]; }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
// 64-bit ELF header. Fields are the same as for ELF32, but with different
// types (see above).
struct Elf64_Ehdr {
- unsigned char e_ident[16];
+ unsigned char e_ident[EI_NIDENT];
Elf64_Quarter e_type;
Elf64_Quarter e_machine;
Elf64_Half e_version;
@@ -86,6 +100,11 @@ struct Elf64_Ehdr {
Elf64_Quarter e_shentsize;
Elf64_Quarter e_shnum;
Elf64_Quarter e_shstrndx;
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
// File types
@@ -117,6 +136,7 @@ enum {
EM_860 = 7, // Intel 80860
EM_MIPS = 8, // MIPS R3000
EM_PPC = 20, // PowerPC
+ EM_PPC64 = 21, // PowerPC64
EM_ARM = 40, // ARM
EM_ALPHA = 41, // DEC Alpha
EM_SPARCV9 = 43, // SPARC V9
@@ -131,13 +151,69 @@ enum {
// Object file byte orderings.
enum {
+ ELFDATANONE = 0, // Invalid data encoding.
ELFDATA2LSB = 1, // Little-endian object file
ELFDATA2MSB = 2 // Big-endian object file
};
-// OS ABI identification -- unused.
+// OS ABI identification.
enum {
- ELFOSABI_NONE = 0
+ ELFOSABI_NONE = 0, // UNIX System V ABI
+ ELFOSABI_HPUX = 1, // HP-UX operating system
+ ELFOSABI_NETBSD = 2, // NetBSD
+ ELFOSABI_LINUX = 3, // GNU/Linux
+ ELFOSABI_HURD = 4, // GNU/Hurd
+ ELFOSABI_SOLARIS = 6, // Solaris
+ ELFOSABI_AIX = 7, // AIX
+ ELFOSABI_IRIX = 8, // IRIX
+ ELFOSABI_FREEBSD = 9, // FreeBSD
+ ELFOSABI_TRU64 = 10, // TRU64 UNIX
+ ELFOSABI_MODESTO = 11, // Novell Modesto
+ ELFOSABI_OPENBSD = 12, // OpenBSD
+ ELFOSABI_OPENVMS = 13, // OpenVMS
+ ELFOSABI_NSK = 14, // Hewlett-Packard Non-Stop Kernel
+ ELFOSABI_AROS = 15, // AROS
+ ELFOSABI_FENIXOS = 16, // FenixOS
+ ELFOSABI_C6000_ELFABI = 64, // Bare-metal TMS320C6000
+ ELFOSABI_C6000_LINUX = 65, // Linux TMS320C6000
+ ELFOSABI_ARM = 97, // ARM
+ ELFOSABI_STANDALONE = 255 // Standalone (embedded) application
+};
+
+// X86_64 relocations.
+enum {
+ R_X86_64_NONE = 0,
+ R_X86_64_64 = 1,
+ R_X86_64_PC32 = 2,
+ R_X86_64_GOT32 = 3,
+ R_X86_64_PLT32 = 4,
+ R_X86_64_COPY = 5,
+ R_X86_64_GLOB_DAT = 6,
+ R_X86_64_JUMP_SLOT = 7,
+ R_X86_64_RELATIVE = 8,
+ R_X86_64_GOTPCREL = 9,
+ R_X86_64_32 = 10,
+ R_X86_64_32S = 11,
+ R_X86_64_16 = 12,
+ R_X86_64_PC16 = 13,
+ R_X86_64_8 = 14,
+ R_X86_64_PC8 = 15,
+ R_X86_64_DTPMOD64 = 16,
+ R_X86_64_DTPOFF64 = 17,
+ R_X86_64_TPOFF64 = 18,
+ R_X86_64_TLSGD = 19,
+ R_X86_64_TLSLD = 20,
+ R_X86_64_DTPOFF32 = 21,
+ R_X86_64_GOTTPOFF = 22,
+ R_X86_64_TPOFF32 = 23,
+ R_X86_64_PC64 = 24,
+ R_X86_64_GOTOFF64 = 25,
+ R_X86_64_GOTPC32 = 26,
+ R_X86_64_SIZE32 = 32,
+ R_X86_64_SIZE64 = 33,
+ R_X86_64_GOTPC32_TLSDESC = 34,
+ R_X86_64_TLSDESC_CALL = 35,
+ R_X86_64_TLSDESC = 36
};
// Section header.
@@ -207,7 +283,7 @@ enum {
SHF_MASKPROC = 0xf0000000 // Bits indicating processor-specific flags.
};
-// Symbol table entries.
+// Symbol table entries for ELF32.
struct Elf32_Sym {
Elf32_Word st_name; // Symbol name (index into string table)
Elf32_Addr st_value; // Value or address associated with the symbol
@@ -218,11 +294,31 @@ struct Elf32_Sym {
// These accessors and mutators correspond to the ELF32_ST_BIND,
// ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
- unsigned char getBinding () const { return st_info >> 4; }
- unsigned char getType () const { return st_info & 0x0f; }
- void setBinding (unsigned char b) { setBindingAndType (b, getType ()); }
- void setType (unsigned char t) { setBindingAndType (getBinding (), t); }
- void setBindingAndType (unsigned char b, unsigned char t) {
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+};
+
+// Symbol table entries for ELF64.
+struct Elf64_Sym {
+ Elf64_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf64_Half st_shndx; // Which section (header table index) it's defined in
+ Elf64_Addr st_value; // Value or address associated with the symbol
+ Elf64_Xword st_size; // Size of the symbol
+
+ // These accessors and mutators are identical to those defined for ELF32
+ // symbol table entries.
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
st_info = (b << 4) + (t & 0x0f);
}
};
@@ -254,11 +350,11 @@ struct Elf32_Rel {
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
- Elf32_Word getSymbol () const { return (r_info >> 8); }
- unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); }
- void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); }
- void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); }
- void setSymbolAndType (Elf32_Word s, unsigned char t) {
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
r_info = (s << 8) + t;
};
};
@@ -271,16 +367,53 @@ struct Elf32_Rela {
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
- Elf32_Word getSymbol () const { return (r_info >> 8); }
- unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); }
- void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); }
- void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); }
- void setSymbolAndType (Elf32_Word s, unsigned char t) {
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
r_info = (s << 8) + t;
};
};
-// Program header.
+// Relocation entry, without explicit addend.
+struct Elf64_Rel {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Xword getSymbol() const { return (r_info >> 32); }
+ unsigned char getType() const {
+ return (unsigned char) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Xword s, unsigned char t) {
+ r_info = (s << 32) + (t&0xffffffffL);
+ };
+};
+
+// Relocation entry with explicit addend.
+struct Elf64_Rela {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+ Elf64_Sxword r_addend; // Compute value for relocatable field by adding this.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Xword getSymbol() const { return (r_info >> 32); }
+ unsigned char getType() const {
+ return (unsigned char) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf64_Xword s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Xword s, unsigned char t) {
+ r_info = (s << 32) + (t&0xffffffffL);
+ };
+};
+
+// Program header for ELF32.
struct Elf32_Phdr {
Elf32_Word p_type; // Type of segment
Elf32_Off p_offset; // File offset where segment is located, in bytes
@@ -292,6 +425,18 @@ struct Elf32_Phdr {
Elf32_Word p_align; // Segment alignment constraint
};
+// Program header for ELF64.
+struct Elf64_Phdr {
+ Elf64_Word p_type; // Type of segment
+ Elf64_Word p_flags; // Segment flags
+ Elf64_Off p_offset; // File offset where segment is located, in bytes
+ Elf64_Addr p_vaddr; // Virtual address of beginning of segment
+ Elf64_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf64_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf64_Xword p_align; // Segment alignment constraint
+};
+
// Segment types.
enum {
PT_NULL = 0, // Unused segment.
@@ -313,6 +458,65 @@ enum {
PF_MASKPROC = 0xf0000000 // Unspecified
};
+// Dynamic table entry for ELF32.
+struct Elf32_Dyn
+{
+ Elf32_Sword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf32_Word d_val; // Integer value of entry.
+ Elf32_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry for ELF64.
+struct Elf64_Dyn
+{
+ Elf64_Sxword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf64_Xword d_val; // Integer value of entry.
+ Elf64_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry tags.
+enum {
+ DT_NULL = 0, // Marks end of dynamic array.
+ DT_NEEDED = 1, // String table offset of needed library.
+ DT_PLTRELSZ = 2, // Size of relocation entries in PLT.
+ DT_PLTGOT = 3, // Address associated with linkage table.
+ DT_HASH = 4, // Address of symbolic hash table.
+ DT_STRTAB = 5, // Address of dynamic string table.
+ DT_SYMTAB = 6, // Address of dynamic symbol table.
+ DT_RELA = 7, // Address of relocation table (Rela entries).
+ DT_RELASZ = 8, // Size of Rela relocation table.
+ DT_RELAENT = 9, // Size of a Rela relocation entry.
+ DT_STRSZ = 10, // Total size of the string table.
+ DT_SYMENT = 11, // Size of a symbol table entry.
+ DT_INIT = 12, // Address of initialization function.
+ DT_FINI = 13, // Address of termination function.
+ DT_SONAME = 14, // String table offset of a shared objects name.
+ DT_RPATH = 15, // String table offset of library search path.
+ DT_SYMBOLIC = 16, // Changes symbol resolution algorithm.
+ DT_REL = 17, // Address of relocation table (Rel entries).
+ DT_RELSZ = 18, // Size of Rel relocation table.
+ DT_RELENT = 19, // Size of a Rel relocation entry.
+ DT_PLTREL = 20, // Type of relocation entry used for linking.
+ DT_DEBUG = 21, // Reserved for debugger.
+ DT_TEXTREL = 22, // Relocations exist for non-writable segements.
+ DT_JMPREL = 23, // Address of relocations associated with PLT.
+ DT_BIND_NOW = 24, // Process all relocations before execution.
+ DT_INIT_ARRAY = 25, // Pointer to array of initialization functions.
+ DT_FINI_ARRAY = 26, // Pointer to array of termination functions.
+ DT_INIT_ARRAYSZ = 27, // Size of DT_INIT_ARRAY.
+ DT_FINI_ARRAYSZ = 28, // Size of DT_FINI_ARRAY.
+ DT_LOOS = 0x60000000, // Start of environment specific tags.
+ DT_HIOS = 0x6FFFFFFF, // End of environment specific tags.
+ DT_LOPROC = 0x70000000, // Start of processor specific tags.
+ DT_HIPROC = 0x7FFFFFFF // End of processor specific tags.
+};
+
} // end namespace ELF
} // end namespace llvm
diff --git a/include/llvm/Support/IRBuilder.h b/include/llvm/Support/IRBuilder.h
index 1fd965d3e773..4b1b1c0411af 100644
--- a/include/llvm/Support/IRBuilder.h
+++ b/include/llvm/Support/IRBuilder.h
@@ -97,6 +97,48 @@ public:
I->setDebugLoc(CurDbgLocation);
}
+ /// InsertPoint - A saved insertion point.
+ class InsertPoint {
+ BasicBlock *Block;
+ BasicBlock::iterator Point;
+
+ public:
+ /// Creates a new insertion point which doesn't point to anything.
+ InsertPoint() : Block(0) {}
+
+ /// Creates a new insertion point at the given location.
+ InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
+ : Block(InsertBlock), Point(InsertPoint) {}
+
+ /// isSet - Returns true if this insert point is set.
+ bool isSet() const { return (Block != 0); }
+
+ llvm::BasicBlock *getBlock() const { return Block; }
+ llvm::BasicBlock::iterator getPoint() const { return Point; }
+ };
+
+ /// saveIP - Returns the current insert point.
+ InsertPoint saveIP() const {
+ return InsertPoint(GetInsertBlock(), GetInsertPoint());
+ }
+
+ /// saveAndClearIP - Returns the current insert point, clearing it
+ /// in the process.
+ InsertPoint saveAndClearIP() {
+ InsertPoint IP(GetInsertBlock(), GetInsertPoint());
+ ClearInsertionPoint();
+ return IP;
+ }
+
+ /// restoreIP - Sets the current insert point to a previously-saved
+ /// location.
+ void restoreIP(InsertPoint IP) {
+ if (IP.isSet())
+ SetInsertPoint(IP.getBlock(), IP.getPoint());
+ else
+ ClearInsertionPoint();
+ }
+
//===--------------------------------------------------------------------===//
// Miscellaneous creation methods.
//===--------------------------------------------------------------------===//
@@ -106,33 +148,88 @@ public:
/// specified. If Name is specified, it is the name of the global variable
/// created.
Value *CreateGlobalString(const char *Str = "", const Twine &Name = "");
+
+ /// getInt1 - Get a constant value representing either true or false.
+ ConstantInt *getInt1(bool V) {
+ return ConstantInt::get(getInt1Ty(), V);
+ }
+
+ /// getTrue - Get the constant value for i1 true.
+ ConstantInt *getTrue() {
+ return ConstantInt::getTrue(Context);
+ }
+
+ /// getFalse - Get the constant value for i1 false.
+ ConstantInt *getFalse() {
+ return ConstantInt::getFalse(Context);
+ }
+
+ /// getInt8 - Get a constant 8-bit value.
+ ConstantInt *getInt8(int8_t C) {
+ return ConstantInt::getSigned(getInt8Ty(), C);
+ }
+
+ /// getInt8 - Get a constant 8-bit value.
+ ConstantInt *getInt8(uint8_t C) {
+ return ConstantInt::get(getInt8Ty(), C);
+ }
+
+ /// getInt16 - Get a constant 16-bit value.
+ ConstantInt *getInt16(int16_t C) {
+ return ConstantInt::getSigned(getInt16Ty(), C);
+ }
+
+ /// getInt16 - Get a constant 16-bit value.
+ ConstantInt *getInt16(uint16_t C) {
+ return ConstantInt::get(getInt16Ty(), C);
+ }
+
+ /// getInt32 - Get a constant 32-bit value.
+ ConstantInt *getInt32(int32_t C) {
+ return ConstantInt::getSigned(getInt32Ty(), C);
+ }
+
+ /// getInt32 - Get a constant 32-bit value.
+ ConstantInt *getInt32(uint32_t C) {
+ return ConstantInt::get(getInt32Ty(), C);
+ }
+
+ /// getInt64 - Get a constant 64-bit value.
+ ConstantInt *getInt64(int64_t C) {
+ return ConstantInt::getSigned(getInt64Ty(), C);
+ }
+
+ /// getInt64 - Get a constant 64-bit value.
+ ConstantInt *getInt64(uint64_t C) {
+ return ConstantInt::get(getInt64Ty(), C);
+ }
//===--------------------------------------------------------------------===//
// Type creation methods
//===--------------------------------------------------------------------===//
/// getInt1Ty - Fetch the type representing a single bit
- const Type *getInt1Ty() {
+ const IntegerType *getInt1Ty() {
return Type::getInt1Ty(Context);
}
/// getInt8Ty - Fetch the type representing an 8-bit integer.
- const Type *getInt8Ty() {
+ const IntegerType *getInt8Ty() {
return Type::getInt8Ty(Context);
}
/// getInt16Ty - Fetch the type representing a 16-bit integer.
- const Type *getInt16Ty() {
+ const IntegerType *getInt16Ty() {
return Type::getInt16Ty(Context);
}
/// getInt32Ty - Fetch the type resepresenting a 32-bit integer.
- const Type *getInt32Ty() {
+ const IntegerType *getInt32Ty() {
return Type::getInt32Ty(Context);
}
/// getInt64Ty - Fetch the type representing a 64-bit integer.
- const Type *getInt64Ty() {
+ const IntegerType *getInt64Ty() {
return Type::getInt64Ty(Context);
}
@@ -151,7 +248,7 @@ public:
return Type::getVoidTy(Context);
}
- const Type *getInt8PtrTy() {
+ const PointerType *getInt8PtrTy() {
return Type::getInt8PtrTy(Context);
}
@@ -624,8 +721,8 @@ public:
return Insert(GetElementPtrInst::Create(Ptr, IdxBegin, IdxEnd), Name);
}
template<typename InputIterator>
- Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd,
- const Twine &Name = "") {
+ Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin,
+ InputIterator IdxEnd, const Twine &Name = "") {
if (Constant *PC = dyn_cast<Constant>(Ptr)) {
// Every index must be constant.
InputIterator i;
diff --git a/include/llvm/Support/IRReader.h b/include/llvm/Support/IRReader.h
index 0dfc302e242a..fe47c057558f 100644
--- a/include/llvm/Support/IRReader.h
+++ b/include/llvm/Support/IRReader.h
@@ -60,7 +60,8 @@ namespace llvm {
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
if (F == 0) {
Err = SMDiagnostic(Filename,
- "Could not open input file '" + Filename + "'");
+ "Could not open input file "
+ "'" + Filename + "': " + ErrMsg);
return 0;
}
@@ -98,7 +99,8 @@ namespace llvm {
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
if (F == 0) {
Err = SMDiagnostic(Filename,
- "Could not open input file '" + Filename + "'");
+ "Could not open input file "
+ "'" + Filename + "': " + ErrMsg);
return 0;
}
diff --git a/include/llvm/Support/MemoryBuffer.h b/include/llvm/Support/MemoryBuffer.h
index ef7af6968405..8a41aa5f94fa 100644
--- a/include/llvm/Support/MemoryBuffer.h
+++ b/include/llvm/Support/MemoryBuffer.h
@@ -26,17 +26,20 @@ namespace llvm {
/// into a memory buffer. In addition to basic access to the characters in the
/// file, this interface guarantees you can read one character past the end of
/// the file, and that this character will read as '\0'.
+///
+/// The '\0' guarantee is needed to support an optimization -- it's intended to
+/// be more efficient for clients which are reading all the data to stop
+/// reading when they encounter a '\0' than to continually check the file
+/// position to see if it has reached the end of the file.
class MemoryBuffer {
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
- /// MustDeleteBuffer - True if we allocated this buffer. If so, the
- /// destructor must know the delete[] it.
- bool MustDeleteBuffer;
+ MemoryBuffer(const MemoryBuffer &); // DO NOT IMPLEMENT
+ MemoryBuffer &operator=(const MemoryBuffer &); // DO NOT IMPLEMENT
protected:
- MemoryBuffer() : MustDeleteBuffer(false) {}
+ MemoryBuffer() {}
void init(const char *BufStart, const char *BufEnd);
- void initCopyOf(const char *BufStart, const char *BufEnd);
public:
virtual ~MemoryBuffer();
@@ -62,24 +65,27 @@ public:
std::string *ErrStr = 0,
int64_t FileSize = -1,
struct stat *FileInfo = 0);
+ static MemoryBuffer *getFile(const char *Filename,
+ std::string *ErrStr = 0,
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
/// that EndPtr[0] must be a null byte and be accessible!
static MemoryBuffer *getMemBuffer(StringRef InputData,
- const char *BufferName = "");
+ StringRef BufferName = "");
/// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
/// copying the contents and taking ownership of it. This has no requirements
/// on EndPtr[0].
static MemoryBuffer *getMemBufferCopy(StringRef InputData,
- const char *BufferName = "");
+ StringRef BufferName = "");
/// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that
/// is completely initialized to zeros. Note that the caller should
/// initialize the memory allocated by this method. The memory is owned by
/// the MemoryBuffer object.
- static MemoryBuffer *getNewMemBuffer(size_t Size,
- const char *BufferName = "");
+ static MemoryBuffer *getNewMemBuffer(size_t Size, StringRef BufferName = "");
/// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size
/// that is not initialized. Note that the caller should initialize the
@@ -89,7 +95,8 @@ public:
StringRef BufferName = "");
/// getSTDIN - Read all of stdin into a file buffer, and return it.
- static MemoryBuffer *getSTDIN();
+ /// If an error occurs, this returns null and fills in *ErrStr with a reason.
+ static MemoryBuffer *getSTDIN(std::string *ErrStr = 0);
/// getFileOrSTDIN - Open the specified file as a MemoryBuffer, or open stdin
@@ -99,6 +106,10 @@ public:
std::string *ErrStr = 0,
int64_t FileSize = -1,
struct stat *FileInfo = 0);
+ static MemoryBuffer *getFileOrSTDIN(const char *Filename,
+ std::string *ErrStr = 0,
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
};
} // end namespace llvm
diff --git a/include/llvm/Support/Timer.h b/include/llvm/Support/Timer.h
index 00dfeaa4fadf..f959136f86a0 100644
--- a/include/llvm/Support/Timer.h
+++ b/include/llvm/Support/Timer.h
@@ -150,8 +150,10 @@ public:
/// is primarily used for debugging and for hunting performance problems.
///
struct NamedRegionTimer : public TimeRegion {
- explicit NamedRegionTimer(StringRef Name);
- explicit NamedRegionTimer(StringRef Name, StringRef GroupName);
+ explicit NamedRegionTimer(StringRef Name,
+ bool Enabled = true);
+ explicit NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled = true);
};
diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h
index 90eaeea12dd8..bb9a52330d7f 100644
--- a/include/llvm/Support/raw_ostream.h
+++ b/include/llvm/Support/raw_ostream.h
@@ -234,8 +234,8 @@ public:
/// @param bold bold/brighter text, default false
/// @param bg if true change the background, default: change foreground
/// @returns itself so it can be used within << invocations
- virtual raw_ostream &changeColor(enum Colors, bool = false,
- bool = false) { return *this; }
+ virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) {
+ return *this; }
/// Resets the colors to terminal defaults. Call this when you are done
/// outputting colored text, or before program exit.
diff --git a/include/llvm/SymbolTableListTraits.h b/include/llvm/SymbolTableListTraits.h
index 39953e1a5809..91a4eb99ff0d 100644
--- a/include/llvm/SymbolTableListTraits.h
+++ b/include/llvm/SymbolTableListTraits.h
@@ -47,9 +47,8 @@ public:
/// of instructions, it returns the BasicBlock that owns them.
ItemParentClass *getListOwner() {
typedef iplist<ValueSubClass> ItemParentClass::*Sublist;
- Sublist Sub(ItemParentClass::
- getSublistAccess(static_cast<ValueSubClass*>(0)));
- size_t Offset(size_t(&((ItemParentClass*)0->*Sub)));
+ size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass::
+ getSublistAccess(static_cast<ValueSubClass*>(0)))));
iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this));
return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
Offset);
diff --git a/include/llvm/System/DataTypes.h.cmake b/include/llvm/System/DataTypes.h.cmake
index d9ca273fedb2..9efe75a56ebc 100644
--- a/include/llvm/System/DataTypes.h.cmake
+++ b/include/llvm/System/DataTypes.h.cmake
@@ -109,41 +109,59 @@ typedef unsigned short uint16_t;
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed int ssize_t;
-#define INT8_MAX 127
-#define INT8_MIN -128
-#define UINT8_MAX 255
-#define INT16_MAX 32767
-#define INT16_MIN -32768
-#define UINT16_MAX 65535
-#define INT32_MAX 2147483647
-#define INT32_MIN -2147483648
-#define UINT32_MAX 4294967295U
+#ifndef INT8_MAX
+# define INT8_MAX 127
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN -128
+#endif
+#ifndef UINT8_MAX
+# define UINT8_MAX 255
+#endif
+#ifndef INT16_MAX
+# define INT16_MAX 32767
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN -32768
+#endif
+#ifndef UINT16_MAX
+# define UINT16_MAX 65535
+#endif
+#ifndef INT32_MAX
+# define INT32_MAX 2147483647
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN -2147483648
+#endif
+#ifndef UINT32_MAX
+# define UINT32_MAX 4294967295U
+#endif
/* Certain compatibility updates to VC++ introduce the `cstdint'
* header, which defines the INT*_C macros. On default installs they
* are absent. */
#ifndef INT8_C
-# define INT8_C(C) C
+# define INT8_C(C) C##i8
#endif
#ifndef UINT8_C
-# define UINT8_C(C) C
+# define UINT8_C(C) C##ui8
#endif
#ifndef INT16_C
-# define INT16_C(C) C
+# define INT16_C(C) C##i16
#endif
#ifndef UINT16_C
-# define UINT16_C(C) C
+# define UINT16_C(C) C##ui16
#endif
#ifndef INT32_C
-# define INT32_C(C) C
+# define INT32_C(C) C##i32
#endif
#ifndef UINT32_C
-# define UINT32_C(C) C ## U
+# define UINT32_C(C) C##ui32
#endif
#ifndef INT64_C
-# define INT64_C(C) ((int64_t) C ## LL)
+# define INT64_C(C) C##i64
#endif
#ifndef UINT64_C
-# define UINT64_C(C) ((uint64_t) C ## ULL)
+# define UINT64_C(C) C##ui64
#endif
#endif /* _MSC_VER */
diff --git a/include/llvm/System/Path.h b/include/llvm/System/Path.h
index d4af478c6aa0..0461769f9710 100644
--- a/include/llvm/System/Path.h
+++ b/include/llvm/System/Path.h
@@ -292,14 +292,6 @@ namespace sys {
/// @name Disk Accessors
/// @{
public:
- /// This function determines if the path name in this object references
- /// the root (top level directory) of the file system. The details of what
- /// is considered the "root" may vary from system to system so this method
- /// will do the necessary checking.
- /// @returns true iff the path name references the root directory.
- /// @brief Determines if the path references the root directory.
- bool isRootDirectory() const;
-
/// This function determines if the path name is absolute, as opposed to
/// relative.
/// @brief Determine if the path is absolute.
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index ca551e586a51..9a89dc942d6c 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -203,7 +203,6 @@ class Instruction {
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
bit mayLoad = 0; // Is it possible for this inst to read memory?
bit mayStore = 0; // Is it possible for this inst to write memory?
- bit isTwoAddress = 0; // Is this a two address instruction?
bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote?
bit isCommutable = 0; // Is this 3 operand instruction commutable?
bit isTerminator = 0; // Is this part of the terminator for a basic block?
@@ -244,7 +243,7 @@ class Instruction {
string DisableEncoding = "";
/// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
- bits<32> TSFlags = 0;
+ bits<64> TSFlags = 0;
}
/// Predicates - These are extra conditionals which are turned into instruction
@@ -397,24 +396,23 @@ class InstrInfo {
}
// Standard Pseudo Instructions.
-let isCodeGenOnly = 1 in {
+// This list must match TargetOpcodes.h and CodeGenTarget.cpp.
+// Only these instructions are allowed in the TargetOpcode namespace.
+let isCodeGenOnly = 1, Namespace = "TargetOpcode" in {
def PHI : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "PHINODE";
- let Namespace = "TargetOpcode";
}
def INLINEASM : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
}
def DBG_LABEL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
@@ -422,7 +420,6 @@ def EH_LABEL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
@@ -430,7 +427,6 @@ def GC_LABEL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
@@ -438,21 +434,18 @@ def KILL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def EXTRACT_SUBREG : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$supersrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def INSERT_SUBREG : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$supersrc, unknown:$subsrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let Constraints = "$supersrc = $dst";
}
@@ -460,7 +453,6 @@ def IMPLICIT_DEF : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
@@ -469,14 +461,12 @@ def SUBREG_TO_REG : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$implsrc, unknown:$subsrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def COPY_TO_REGCLASS : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$src, i32imm:$regclass);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let isAsCheapAsAMove = 1;
}
@@ -484,15 +474,19 @@ def DBG_VALUE : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "DBG_VALUE";
- let Namespace = "TargetOpcode";
let isAsCheapAsAMove = 1;
}
-
def REG_SEQUENCE : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
+ let neverHasSideEffects = 1;
+ let isAsCheapAsAMove = 1;
+}
+def COPY : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$src);
+ let AsmString = "";
let neverHasSideEffects = 1;
let isAsCheapAsAMove = 1;
}
diff --git a/include/llvm/Target/TargetAsmParser.h b/include/llvm/Target/TargetAsmParser.h
index 85315c155d8a..dc2b2364e595 100644
--- a/include/llvm/Target/TargetAsmParser.h
+++ b/include/llvm/Target/TargetAsmParser.h
@@ -10,6 +10,8 @@
#ifndef LLVM_TARGET_TARGETPARSER_H
#define LLVM_TARGET_TARGETPARSER_H
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+
namespace llvm {
class MCInst;
class StringRef;
@@ -20,7 +22,7 @@ class MCParsedAsmOperand;
template <typename T> class SmallVectorImpl;
/// TargetAsmParser - Generic interface to target specific assembly parsers.
-class TargetAsmParser {
+class TargetAsmParser : public MCAsmParserExtension {
TargetAsmParser(const TargetAsmParser &); // DO NOT IMPLEMENT
void operator=(const TargetAsmParser &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
diff --git a/include/llvm/Target/TargetCallingConv.h b/include/llvm/Target/TargetCallingConv.h
new file mode 100644
index 000000000000..f368a2e38c42
--- /dev/null
+++ b/include/llvm/Target/TargetCallingConv.h
@@ -0,0 +1,142 @@
+//===-- llvm/Target/TargetCallingConv.h - Calling Convention ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines types for working with calling-convention information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
+#define LLVM_TARGET_TARGETCALLINGCONV_H
+
+namespace llvm {
+
+namespace ISD {
+ struct ArgFlagsTy {
+ private:
+ static const uint64_t NoFlagSet = 0ULL;
+ static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
+ static const uint64_t ZExtOffs = 0;
+ static const uint64_t SExt = 1ULL<<1; ///< Sign extended
+ static const uint64_t SExtOffs = 1;
+ static const uint64_t InReg = 1ULL<<2; ///< Passed in register
+ static const uint64_t InRegOffs = 2;
+ static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
+ static const uint64_t SRetOffs = 3;
+ static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
+ static const uint64_t ByValOffs = 4;
+ static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
+ static const uint64_t NestOffs = 5;
+ static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
+ static const uint64_t ByValAlignOffs = 6;
+ static const uint64_t Split = 1ULL << 10;
+ static const uint64_t SplitOffs = 10;
+ static const uint64_t OrigAlign = 0x1FULL<<27;
+ static const uint64_t OrigAlignOffs = 27;
+ static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
+ static const uint64_t ByValSizeOffs = 32;
+
+ static const uint64_t One = 1ULL; //< 1 of this type, for shifts
+
+ uint64_t Flags;
+ public:
+ ArgFlagsTy() : Flags(0) { }
+
+ bool isZExt() const { return Flags & ZExt; }
+ void setZExt() { Flags |= One << ZExtOffs; }
+
+ bool isSExt() const { return Flags & SExt; }
+ void setSExt() { Flags |= One << SExtOffs; }
+
+ bool isInReg() const { return Flags & InReg; }
+ void setInReg() { Flags |= One << InRegOffs; }
+
+ bool isSRet() const { return Flags & SRet; }
+ void setSRet() { Flags |= One << SRetOffs; }
+
+ bool isByVal() const { return Flags & ByVal; }
+ void setByVal() { Flags |= One << ByValOffs; }
+
+ bool isNest() const { return Flags & Nest; }
+ void setNest() { Flags |= One << NestOffs; }
+
+ unsigned getByValAlign() const {
+ return (unsigned)
+ ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
+ }
+ void setByValAlign(unsigned A) {
+ Flags = (Flags & ~ByValAlign) |
+ (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
+ }
+
+ bool isSplit() const { return Flags & Split; }
+ void setSplit() { Flags |= One << SplitOffs; }
+
+ unsigned getOrigAlign() const {
+ return (unsigned)
+ ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
+ }
+ void setOrigAlign(unsigned A) {
+ Flags = (Flags & ~OrigAlign) |
+ (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
+ }
+
+ unsigned getByValSize() const {
+ return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
+ }
+ void setByValSize(unsigned S) {
+ Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
+ }
+
+ /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4".
+ std::string getArgFlagsString();
+
+ /// getRawBits - Represent the flags as a bunch of bits.
+ uint64_t getRawBits() const { return Flags; }
+ };
+
+ /// InputArg - This struct carries flags and type information about a
+ /// single incoming (formal) argument or incoming (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct InputArg {
+ ArgFlagsTy Flags;
+ EVT VT;
+ bool Used;
+
+ InputArg() : VT(MVT::Other), Used(false) {}
+ InputArg(ArgFlagsTy flags, EVT vt, bool used)
+ : Flags(flags), VT(vt), Used(used) {
+ assert(VT.isSimple() &&
+ "InputArg value type must be Simple!");
+ }
+ };
+
+ /// OutputArg - This struct carries flags and a value for a
+ /// single outgoing (actual) argument or outgoing (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct OutputArg {
+ ArgFlagsTy Flags;
+ EVT VT;
+
+ /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
+ bool IsFixed;
+
+ OutputArg() : IsFixed(false) {}
+ OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
+ : Flags(flags), VT(vt), IsFixed(isfixed) {
+ assert(VT.isSimple() &&
+ "OutputArg value type must be Simple!");
+ }
+ };
+}
+
+} // end llvm namespace
+
+#endif
diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h
index adc37e16e45f..8f0a6cb1a68e 100644
--- a/include/llvm/Target/TargetInstrDesc.h
+++ b/include/llvm/Target/TargetInstrDesc.h
@@ -15,6 +15,8 @@
#ifndef LLVM_TARGET_TARGETINSTRDESC_H
#define LLVM_TARGET_TARGETINSTRDESC_H
+#include "llvm/System/DataTypes.h"
+
namespace llvm {
class TargetRegisterClass;
@@ -53,7 +55,7 @@ public:
///
/// NOTE: This member should be considered to be private, all access should go
/// through "getRegClass(TRI)" below.
- unsigned short RegClass;
+ short RegClass;
/// Flags - These are flags from the TOI::OperandFlags enum.
unsigned short Flags;
@@ -131,7 +133,7 @@ public:
unsigned short SchedClass; // enum identifying instr sched class
const char * Name; // Name of the instruction record in td file
unsigned Flags; // Flags identifying machine instr class
- unsigned TSFlags; // Target Specific Flag values
+ uint64_t TSFlags; // Target Specific Flag values
const unsigned *ImplicitUses; // Registers implicitly read by this instr
const unsigned *ImplicitDefs; // Registers implicitly defined by this instr
const TargetRegisterClass **RCBarriers; // Reg classes completely "clobbered"
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 2e5697e46ef9..6e6991432052 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -20,12 +20,14 @@
namespace llvm {
class CalleeSavedInfo;
+class InstrItineraryData;
class LiveVariables;
class MCAsmInfo;
class MachineMemOperand;
class MDNode;
class MCInst;
class SDNode;
+class ScheduleHazardRecognizer;
class SelectionDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -120,10 +122,6 @@ public:
SrcReg == DstReg)
return true;
- if (MI.getOpcode() == TargetOpcode::EXTRACT_SUBREG &&
- MI.getOperand(0).getReg() == MI.getOperand(1).getReg())
- return true;
-
if ((MI.getOpcode() == TargetOpcode::INSERT_SUBREG ||
MI.getOpcode() == TargetOpcode::SUBREG_TO_REG) &&
MI.getOperand(0).getReg() == MI.getOperand(2).getReg())
@@ -194,11 +192,22 @@ public:
/// reMaterialize - Re-issue the specified 'original' instruction at the
/// specific location targeting a new destination register.
+ /// The register in Orig->getOperand(0).getReg() will be substituted by
+ /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
+ /// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const = 0;
+ const TargetRegisterInfo &TRI) const = 0;
+
+ /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+ /// two-addrss instruction inserted by two-address pass.
+ virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
+ MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const {
+ // Do nothing.
+ }
/// duplicate - Create a duplicate of the Orig instruction in MF. This is like
/// MachineFunction::CloneMachineInstr(), but the target may update operands
@@ -224,23 +233,19 @@ public:
return 0;
}
- /// commuteInstruction - If a target has any instructions that are commutable,
- /// but require converting to a different instruction or making non-trivial
- /// changes to commute them, this method can overloaded to do this. The
- /// default implementation of this method simply swaps the first two operands
- /// of MI and returns it.
- ///
- /// If a target wants to make more aggressive changes, they can construct and
- /// return a new machine instruction. If an instruction cannot commute, it
- /// can also return null.
- ///
- /// If NewMI is true, then a new machine instruction must be created.
- ///
+ /// commuteInstruction - If a target has any instructions that are
+ /// commutable but require converting to different instructions or making
+ /// non-trivial changes to commute them, this method can overloaded to do
+ /// that. The default implementation simply swaps the commutable operands.
+ /// If NewMI is false, MI is modified in place and returned; otherwise, a
+ /// new machine instruction is created and returned. Do not call this
+ /// method for a non-commutable instruction, but there may be some cases
+ /// where this method fails and returns null.
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const = 0;
/// findCommutedOpIndices - If specified MI is commutable, return the two
- /// operand indices that would swap value. Return true if the instruction
+ /// operand indices that would swap value. Return false if the instruction
/// is not in a form which this routine understands.
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const = 0;
@@ -302,25 +307,60 @@ public:
/// branch to analyze. At least this much must be implemented, else tail
/// merging needs to be disabled.
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
return 0;
}
+
+ /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
+ /// after it, replacing it with an unconditional branch to NewDest. This is
+ /// used by the tail merging pass.
+ virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const = 0;
+
+ /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic
+ /// block at the specified instruction (i.e. instruction would be the start
+ /// of a new basic block).
+ virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {
+ return true;
+ }
+
+ /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
+ /// of the specified basic block.
+ virtual
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+ return false;
+ }
- /// copyRegToReg - Emit instructions to copy between a pair of registers. It
- /// returns false if the target does not how to copy between the specified
- /// registers.
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- assert(0 && "Target didn't implement TargetInstrInfo::copyRegToReg!");
+ /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
+ /// checks for the case where two basic blocks from true and false path
+ /// of a if-then-else (diamond) are predicated on mutally exclusive
+ /// predicates.
+ virtual bool
+ isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
+ MachineBasicBlock &FMBB, unsigned NumFInstrs) const {
+ return false;
+ }
+
+ /// isProfitableToDupForIfCvt - Return true if it's profitable for
+ /// if-converter to duplicate a specific number of instructions in the
+ /// specified MBB to enable if-conversion.
+ virtual bool
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs) const {
return false;
}
+ /// copyPhysReg - Emit instructions to copy a pair of physical registers.
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ assert(0 && "Target didn't implement TargetInstrInfo::copyPhysReg!");
+ }
+
/// storeRegToStackSlot - Store the specified register of the given register
/// class to the specified stack frame index. The store instruction is to be
/// added to the given machine basic block before the specified machine
@@ -387,19 +427,17 @@ public:
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
- /// operand folded, otherwise NULL is returned. The client is responsible for
- /// removing the old instruction and adding the new one in the instruction
- /// stream.
- MachineInstr* foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+ /// operand folded, otherwise NULL is returned.
+ /// The new instruction is inserted before MI, and the client is responsible
+ /// for removing the old instruction.
+ MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
- MachineInstr* foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+ MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
@@ -429,9 +467,7 @@ public:
/// folding is possible.
virtual
bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- return false;
- }
+ const SmallVectorImpl<unsigned> &Ops) const =0;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
@@ -548,6 +584,13 @@ public:
return true;
}
+ /// isSchedulingBoundary - Test if the given instruction should be
+ /// considered a scheduling boundary. This primarily includes labels and
+ /// terminators.
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const = 0;
+
/// GetInstSize - Returns the size of the specified Instruction.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const {
@@ -564,6 +607,12 @@ public:
/// length.
virtual unsigned getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const;
+
+ /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer
+ /// to use for this target when scheduling the machine instructions after
+ /// register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
};
/// TargetInstrInfoImpl - This is the default implementation of
@@ -575,22 +624,32 @@ protected:
TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes)
: TargetInstrInfo(desc, NumOpcodes) {}
public:
+ virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
+ MachineBasicBlock *NewDest) const;
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const;
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
+ virtual bool canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const;
virtual bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubReg,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo &TRI) const;
virtual MachineInstr *duplicate(MachineInstr *Orig,
MachineFunction &MF) const;
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1) const;
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const;
virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const;
+
+ virtual ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
};
} // End llvm namespace
diff --git a/include/llvm/Target/TargetInstrItineraries.h b/include/llvm/Target/TargetInstrItineraries.h
index 3dfa8bc10bfe..39648c233fa8 100644
--- a/include/llvm/Target/TargetInstrItineraries.h
+++ b/include/llvm/Target/TargetInstrItineraries.h
@@ -106,7 +106,8 @@ struct InstrItinerary {
/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be
/// used by a target.
///
-struct InstrItineraryData {
+class InstrItineraryData {
+public:
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
const InstrItinerary *Itineratries; ///< Array of itineraries selected
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 5efebe637e10..2b6e4fa8526d 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -24,6 +24,7 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
+#include "llvm/Attributes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/ADT/APFloat.h"
@@ -32,6 +33,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DebugLoc.h"
+#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
#include <climits>
#include <map>
@@ -42,6 +44,7 @@ namespace llvm {
class CallInst;
class Function;
class FastISel;
+ class FunctionLoweringInfo;
class MachineBasicBlock;
class MachineFunction;
class MachineFrameInfo;
@@ -114,7 +117,7 @@ public:
/// isSelectExpensive - Return true if the select operation is expensive for
/// this target.
bool isSelectExpensive() const { return SelectIsExpensive; }
-
+
/// isIntDivCheap() - Return true if integer divide is usually cheaper than
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
@@ -131,10 +134,10 @@ public:
virtual
MVT::SimpleValueType getSetCCResultType(EVT VT) const;
- /// getCmpLibcallReturnType - Return the ValueType for comparison
+ /// getCmpLibcallReturnType - Return the ValueType for comparison
/// libcalls. Comparions libcalls include floating point comparion calls,
/// and Ordered/Unordered check calls on floating point numbers.
- virtual
+ virtual
MVT::SimpleValueType getCmpLibcallReturnType() const;
/// getBooleanContents - For targets without i1 registers, this gives the
@@ -208,7 +211,7 @@ public:
ValueTypeActions[I] = Action;
}
};
-
+
const ValueTypeActionImpl &getValueTypeActions() const {
return ValueTypeActions;
}
@@ -229,7 +232,7 @@ public:
/// returns the integer type to transform to.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
- assert((unsigned)VT.getSimpleVT().SimpleTy <
+ assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
assert(getTypeAction(Context, NVT) != Promote &&
@@ -256,7 +259,7 @@ public:
return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
else
// Promote to a power of two size, avoiding multi-step promotion.
- return getTypeAction(Context, NVT) == Promote ?
+ return getTypeAction(Context, NVT) == Promote ?
getTypeToTransformTo(Context, NVT) : NVT;
}
assert(0 && "Unsupported extended type!");
@@ -302,11 +305,11 @@ public:
/// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
/// this is the case, it returns true and store the intrinsic
/// information into the IntrinsicInfo that was passed to the function.
- struct IntrinsicInfo {
+ struct IntrinsicInfo {
unsigned opc; // target opcode
EVT memVT; // memory VT
const Value* ptrVal; // value representing memory location
- int offset; // offset off of ptrVal
+ int offset; // offset off of ptrVal
unsigned align; // alignment
bool vol; // is volatile?
bool readMem; // reads memory?
@@ -324,7 +327,7 @@ public:
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false;
}
-
+
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
@@ -446,7 +449,7 @@ public:
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
- }
+ }
/// isIndexedStoreLegal - Return true if the specified indexed load is legal
/// on this target.
@@ -492,7 +495,7 @@ public:
assert((VT.isInteger() || VT.isFloatingPoint()) &&
"Cannot autopromote this type, add it with AddPromotedToType.");
-
+
EVT NVT = VT;
do {
NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
@@ -516,14 +519,14 @@ public:
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
virtual unsigned getByValTypeAlignment(const Type *Ty) const;
-
+
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
EVT getRegisterType(MVT VT) const {
assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.SimpleTy];
}
-
+
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
EVT getRegisterType(LLVMContext &Context, EVT VT) const {
@@ -606,7 +609,7 @@ public:
/// of the specified type. This is used, for example, in situations where an
/// array copy/move/set is converted to a sequence of store operations. It's
/// use helps to ensure that such replacements don't generate code that causes
- /// an alignment error (trap) on the target machine.
+ /// an alignment error (trap) on the target machine.
/// @brief Determine if the target supports unaligned memory accesses.
virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
return false;
@@ -637,7 +640,7 @@ public:
MachineFunction &MF) const {
return MVT::Other;
}
-
+
/// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
/// to implement llvm.setjmp.
bool usesUnderscoreSetJmp() const {
@@ -683,17 +686,10 @@ public:
return JumpBufAlignment;
}
- /// getIfCvtBlockLimit - returns the target specific if-conversion block size
- /// limit. Any block whose size is greater should not be predicated.
- unsigned getIfCvtBlockSizeLimit() const {
- return IfCvtBlockSizeLimit;
- }
-
- /// getIfCvtDupBlockLimit - returns the target specific size limit for a
- /// block to be considered for duplication. Any block whose size is greater
- /// should not be duplicated to facilitate its predication.
- unsigned getIfCvtDupBlockSizeLimit() const {
- return IfCvtDupBlockSizeLimit;
+ /// getMinStackArgumentAlignment - return the minimum stack alignment of an
+ /// argument.
+ unsigned getMinStackArgumentAlignment() const {
+ return MinStackArgumentAlignment;
}
/// getPrefLoopAlignment - return the preferred loop alignment.
@@ -701,7 +697,14 @@ public:
unsigned getPrefLoopAlignment() const {
return PrefLoopAlignment;
}
-
+
+ /// getShouldFoldAtomicFences - return whether the combiner should fold
+ /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
+ ///
+ bool getShouldFoldAtomicFences() const {
+ return ShouldFoldAtomicFences;
+ }
+
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
@@ -711,7 +714,7 @@ public:
SelectionDAG &DAG) const {
return false;
}
-
+
/// getPostIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if this node can be
/// combined with a load / store to form a post-indexed load / store.
@@ -721,12 +724,12 @@ public:
SelectionDAG &DAG) const {
return false;
}
-
+
/// getJumpTableEncoding - Return the entry encoding for a jump table in the
/// current function. The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
virtual unsigned getJumpTableEncoding() const;
-
+
virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
@@ -734,7 +737,7 @@ public:
assert(0 && "Need to implement this hook if target has custom JTIs");
return 0;
}
-
+
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
virtual SDValue getPICJumpTableRelocBase(SDValue Table,
@@ -746,7 +749,7 @@ public:
virtual const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
-
+
/// isOffsetFoldingLegal - Return true if folding a constant offset
/// with the given GlobalAddress is legal. It is frequently not legal in
/// PIC relocation models.
@@ -755,36 +758,42 @@ public:
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *) const = 0;
+ /// getStackCookieLocation - Return true if the target stores stack
+ /// protector cookies at a fixed offset in some non-standard address
+ /// space, and populates the address space and offset as
+ /// appropriate.
+ virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
-
+
/// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
/// SDValues for returning information from TargetLowering to its clients
- /// that want to combine
+ /// that want to combine
struct TargetLoweringOpt {
SelectionDAG &DAG;
bool LegalTys;
bool LegalOps;
- bool ShrinkOps;
SDValue Old;
SDValue New;
explicit TargetLoweringOpt(SelectionDAG &InDAG,
- bool LT, bool LO,
- bool Shrink = false) :
- DAG(InDAG), LegalTys(LT), LegalOps(LO), ShrinkOps(Shrink) {}
+ bool LT, bool LO) :
+ DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
bool LegalTypes() const { return LegalTys; }
bool LegalOperations() const { return LegalOps; }
-
- bool CombineTo(SDValue O, SDValue N) {
- Old = O;
- New = N;
+
+ bool CombineTo(SDValue O, SDValue N) {
+ Old = O;
+ New = N;
return true;
}
-
- /// ShrinkDemandedConstant - Check to see if the specified operand of the
+
+ /// ShrinkDemandedConstant - Check to see if the specified operand of the
/// specified instruction is a constant integer. If so, check to see if
/// there are any bits set in the constant that are not demanded. If so,
/// shrink the constant and return true.
@@ -797,25 +806,25 @@ public:
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
DebugLoc dl);
};
-
+
/// SimplifyDemandedBits - Look at Op. At this point, we know that only the
/// DemandedMask bits of the result of Op are ever used downstream. If we can
/// use this information to simplify Op, create a new simplified DAG node and
- /// return true, returning the original and new nodes in Old and New.
- /// Otherwise, analyze the expression and return a mask of KnownOne and
- /// KnownZero bits for the expression (used to simplify the caller).
- /// The KnownZero/One bits may only be accurate for those bits in the
+ /// return true, returning the original and new nodes in Old and New.
+ /// Otherwise, analyze the expression and return a mask of KnownOne and
+ /// KnownZero bits for the expression (used to simplify the caller).
+ /// The KnownZero/One bits may only be accurate for those bits in the
/// DemandedMask.
- bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+ bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
APInt &KnownZero, APInt &KnownOne,
TargetLoweringOpt &TLO, unsigned Depth = 0) const;
-
+
/// computeMaskedBitsForTargetNode - Determine which of the bits specified in
- /// Mask are known to be either zero or one and return them in the
+ /// Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
- APInt &KnownZero,
+ APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
@@ -825,7 +834,7 @@ public:
/// DAG Combiner.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
unsigned Depth = 0) const;
-
+
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
bool BeforeLegalize;
@@ -833,15 +842,15 @@ public:
bool CalledByLegalizer;
public:
SelectionDAG &DAG;
-
+
DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
: DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
CalledByLegalizer(cl), DAG(dag) {}
-
+
bool isBeforeLegalize() const { return BeforeLegalize; }
bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
bool isCalledByLegalizer() const { return CalledByLegalizer; }
-
+
void AddToWorklist(SDNode *N);
SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
bool AddTo = true);
@@ -851,7 +860,7 @@ public:
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
};
- /// SimplifySetCC - Try to simplify a setcc built with the specified operands
+ /// SimplifySetCC - Try to simplify a setcc built with the specified operands
/// and cc. If it is unable to simplify it, return a null SDValue.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
@@ -892,7 +901,7 @@ public:
virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
return false;
}
-
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -932,7 +941,7 @@ protected:
void setStackPointerRegisterToSaveRestore(unsigned R) {
StackPointerRegisterToSaveRestore = R;
}
-
+
/// setExceptionPointerRegister - If set to a physical register, this sets
/// the register that receives the exception address on entry to a landing
/// pad.
@@ -955,12 +964,12 @@ protected:
/// expensive, and if possible, should be replaced by an alternate sequence
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
-
+
/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
-
+
/// addRegisterClass - Add the specified register class as an available
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
@@ -983,7 +992,7 @@ protected:
assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
}
-
+
/// setLoadExtAction - Indicate that the specified load with extension does
/// not work with the specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, MVT VT,
@@ -993,7 +1002,7 @@ protected:
"Table isn't big enough!");
LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
}
-
+
/// setTruncStoreAction - Indicate that the specified truncating store does
/// not work with the specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT,
@@ -1018,7 +1027,7 @@ protected:
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
}
-
+
/// setIndexedStoreAction - Indicate that the specified indexed store does or
/// does not work with the specified type and indicate what to do about
/// it. NOTE: All indexed mode stores are initialized to Expand in
@@ -1033,7 +1042,7 @@ protected:
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
}
-
+
/// setCondCodeAction - Indicate that the specified condition code is or isn't
/// supported on the target and indicate what to do about it.
void setCondCodeAction(ISD::CondCode CC, MVT VT,
@@ -1060,7 +1069,7 @@ protected:
assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
}
-
+
/// setJumpBufSize - Set the target's required jmp_buf buffer size (in
/// bytes); default is 200
void setJumpBufSize(unsigned Size) {
@@ -1073,25 +1082,24 @@ protected:
JumpBufAlignment = Align;
}
- /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
- /// limit (in number of instructions); default is 2.
- void setIfCvtBlockSizeLimit(unsigned Limit) {
- IfCvtBlockSizeLimit = Limit;
- }
-
- /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
- /// of instructions) to be considered for code duplication during
- /// if-conversion; default is 2.
- void setIfCvtDupBlockSizeLimit(unsigned Limit) {
- IfCvtDupBlockSizeLimit = Limit;
- }
-
/// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
/// alignment is zero, it means the target does not care about loop alignment.
void setPrefLoopAlignment(unsigned Align) {
PrefLoopAlignment = Align;
}
-
+
+ /// setMinStackArgumentAlignment - Set the minimum stack alignment of an
+ /// argument.
+ void setMinStackArgumentAlignment(unsigned Align) {
+ MinStackArgumentAlignment = Align;
+ }
+
+ /// setShouldFoldAtomicFences - Set if the target's implementation of the
+ /// atomic operation intrinsics includes locking. Default is false.
+ void setShouldFoldAtomicFences(bool fold) {
+ ShouldFoldAtomicFences = fold;
+ }
+
public:
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
@@ -1151,6 +1159,7 @@ public:
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -1163,9 +1172,8 @@ public:
/// registers. If false is returned, an sret-demotion is performed.
///
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG) const
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const
{
// Return true by default to get preexisting behavior.
return true;
@@ -1179,6 +1187,7 @@ public:
virtual SDValue
LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
@@ -1200,7 +1209,7 @@ public:
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
- /// LowerOperation - This callback is invoked for operations that are
+ /// LowerOperation - This callback is invoked for operations that are
/// unsupported by the target, which are registered to use 'custom' lowering,
/// and whose defined values are all legal.
/// If the target has no operations that require custom lowering, it need not
@@ -1227,23 +1236,14 @@ public:
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *
- createFastISel(MachineFunction &,
- DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &,
- DenseMap<const AllocaInst *, int> &,
- std::vector<std::pair<MachineInstr*, unsigned> > &
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &CatchInfoLost
-#endif
- ) const {
+ virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const {
return 0;
}
//===--------------------------------------------------------------------===//
// Inline Asm Support hooks
//
-
+
/// ExpandInlineAsm - This hook allows the target to expand an inline asm
/// call to be explicit llvm code if it wants to. This is useful for
/// turning simple inline asms into LLVM intrinsics, which gives the
@@ -1251,7 +1251,7 @@ public:
virtual bool ExpandInlineAsm(CallInst *CI) const {
return false;
}
-
+
enum ConstraintType {
C_Register, // Constraint represents specific register(s).
C_RegisterClass, // Constraint represents any of register(s) in class.
@@ -1259,7 +1259,7 @@ public:
C_Other, // Something else.
C_Unknown // Unsupported constraint.
};
-
+
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
@@ -1271,25 +1271,25 @@ public:
/// ConstraintType - Information about the constraint code, e.g. Register,
/// RegisterClass, Memory, Other, Unknown.
TargetLowering::ConstraintType ConstraintType;
-
+
/// CallOperandval - If this is the result output operand or a
/// clobber, this is null, otherwise it is the incoming operand to the
/// CallInst. This gets modified as the asm is processed.
Value *CallOperandVal;
-
+
/// ConstraintVT - The ValueType for the operand value.
EVT ConstraintVT;
-
+
/// isMatchingInputConstraint - Return true of this is an input operand that
/// is a matching constraint like "4".
bool isMatchingInputConstraint() const;
-
+
/// getMatchedOperand - If this is an input matching constraint, this method
/// returns the output operand it matches.
unsigned getMatchedOperand() const;
-
+
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
- : InlineAsm::ConstraintInfo(info),
+ : InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
CallOperandVal(0), ConstraintVT(MVT::Other) {
}
@@ -1299,21 +1299,19 @@ public:
/// type to use for the specific AsmOperandInfo, setting
/// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand
/// being passed in is available, it can be passed in as Op, otherwise an
- /// empty SDValue can be passed. If hasMemory is true it means one of the asm
- /// constraint of the inline asm instruction being processed is 'm'.
+ /// empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
- bool hasMemory,
SelectionDAG *DAG = 0) const;
-
+
/// getConstraintType - Given a constraint, return the type of constraint it
/// is for this target.
virtual ConstraintType getConstraintType(const std::string &Constraint) const;
-
+
/// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
/// return a list of registers that can be used to satisfy the constraint.
/// This should only be used for C_RegisterClass constraints.
- virtual std::vector<unsigned>
+ virtual std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -1327,29 +1325,26 @@ public:
///
/// This should only be used for C_Register constraints. On error,
/// this returns a register number of 0 and a null register class pointer..
- virtual std::pair<unsigned, const TargetRegisterClass*>
+ virtual std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
-
+
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand. This returns null if there is no replacement to
/// make.
virtual const char *LowerXConstraint(EVT ConstraintVT) const;
-
+
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
- /// it means one of the asm constraint of the inline asm instruction being
- /// processed is 'm'.
+ /// vector. If it is invalid, don't add anything to Ops.
virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
-
+
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
-
+
// EmitInstrWithCustomInserter - This method should be implemented by targets
// that mark instructions with the 'usesCustomInserter' flag. These
// instructions are special in various ways, which require special support to
@@ -1378,7 +1373,7 @@ public:
int64_t Scale;
AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
-
+
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
@@ -1431,9 +1426,9 @@ public:
//===--------------------------------------------------------------------===//
// Div utility functions
//
- SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const;
- SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const;
@@ -1470,7 +1465,7 @@ public:
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
LibcallCallingConvs[Call] = CC;
}
-
+
/// getLibcallCallingConv - Get the CallingConv that should be used for the
/// specified libcall.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
@@ -1499,12 +1494,12 @@ private:
/// a real cost model is in place. If we ever optimize for size, this will be
/// set to true unconditionally.
bool IntDivIsCheap;
-
+
/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
bool Pow2DivIsCheap;
-
+
/// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
/// llvm.setjmp. Defaults to false.
bool UseUnderscoreSetJmp;
@@ -1524,26 +1519,28 @@ private:
/// SchedPreferenceInfo - The target scheduling preference: shortest possible
/// total cycles or lowest register usage.
Sched::Preference SchedPreferenceInfo;
-
+
/// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
unsigned JumpBufSize;
-
+
/// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
/// buffers
unsigned JumpBufAlignment;
- /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
- /// if-converted.
- unsigned IfCvtBlockSizeLimit;
-
- /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
- /// duplicated during if-conversion.
- unsigned IfCvtDupBlockSizeLimit;
+ /// MinStackArgumentAlignment - The minimum alignment that any argument
+ /// on the stack needs to have.
+ ///
+ unsigned MinStackArgumentAlignment;
/// PrefLoopAlignment - The perferred loop alignment.
///
unsigned PrefLoopAlignment;
+ /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
+ /// be folded into the enclosed atomic intrinsic instruction by the
+ /// combiner.
+ bool ShouldFoldAtomicFences;
+
/// StackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1583,12 +1580,12 @@ private:
/// operations that are not should be described. Note that operations on
/// non-legal value types are not described here.
uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
-
+
/// LoadExtActions - For each load extension type and each value type,
/// keep a LegalizeAction that indicates how instruction selection should deal
/// with a load of a specific value type and extension type.
uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
-
+
/// TruncStoreActions - For each value type pair keep a LegalizeAction that
/// indicates whether a truncating store of a specific value type and
/// truncating type is legal.
@@ -1600,7 +1597,7 @@ private:
/// value_type for the reference. The second dimension represents the various
/// modes for load store.
uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
-
+
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
/// deal with the condition code.
@@ -1615,7 +1612,7 @@ private:
/// which sets a bit in this array.
unsigned char
TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
-
+
/// PromoteToType - For operations that must be promoted to a specific type,
/// this holds the destination type. This map should be sparse, so don't hold
/// it as an array.
@@ -1676,6 +1673,15 @@ protected:
/// optimization.
bool benefitFromCodePlacementOpt;
};
+
+/// GetReturnInfo - Given an LLVM IR type and return type attributes,
+/// compute the return value EVTs and flags, and optionally also
+/// the offsets, if the return value is being lowered to memory.
+void GetReturnInfo(const Type* ReturnType, Attributes attr,
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const TargetLowering &TLI,
+ SmallVectorImpl<uint64_t> *Offsets = 0);
+
} // end llvm namespace
#endif
diff --git a/include/llvm/Target/TargetOpcodes.h b/include/llvm/Target/TargetOpcodes.h
index c4deaa8fbc19..cb772ecd77de 100644
--- a/include/llvm/Target/TargetOpcodes.h
+++ b/include/llvm/Target/TargetOpcodes.h
@@ -15,52 +15,54 @@
#define LLVM_TARGET_TARGETOPCODES_H
namespace llvm {
-
+
/// Invariant opcodes: All instruction sets have these as their low opcodes.
+///
+/// Every instruction defined here must also appear in Target.td and the order
+/// must be the same as in CodeGenTarget.cpp.
+///
namespace TargetOpcode {
- enum {
+ enum {
PHI = 0,
INLINEASM = 1,
DBG_LABEL = 2,
EH_LABEL = 3,
GC_LABEL = 4,
-
+
/// KILL - This instruction is a noop that is used only to adjust the
/// liveness of registers. This can be useful when dealing with
/// sub-registers.
KILL = 5,
-
+
/// EXTRACT_SUBREG - This instruction takes two operands: a register
/// that has subregisters, and a subregister index. It returns the
/// extracted subregister value. This is commonly used to implement
/// truncation operations on target architectures which support it.
EXTRACT_SUBREG = 6,
-
- /// INSERT_SUBREG - This instruction takes three operands: a register
- /// that has subregisters, a register providing an insert value, and a
- /// subregister index. It returns the value of the first register with
- /// the value of the second register inserted. The first register is
- /// often defined by an IMPLICIT_DEF, as is commonly used to implement
+
+ /// INSERT_SUBREG - This instruction takes three operands: a register that
+ /// has subregisters, a register providing an insert value, and a
+ /// subregister index. It returns the value of the first register with the
+ /// value of the second register inserted. The first register is often
+ /// defined by an IMPLICIT_DEF, because it is commonly used to implement
/// anyext operations on target architectures which support it.
INSERT_SUBREG = 7,
-
+
/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
IMPLICIT_DEF = 8,
-
- /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except
- /// that the first operand is an immediate integer constant. This constant
- /// is often zero, as is commonly used to implement zext operations on
- /// target architectures which support it, such as with x86-64 (with
- /// zext from i32 to i64 via implicit zero-extension).
+
+ /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
+ /// the first operand is an immediate integer constant. This constant is
+ /// often zero, because it is commonly used to assert that the instruction
+ /// defining the register implicitly clears the high bits.
SUBREG_TO_REG = 9,
-
+
/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
/// register-to-register copy into a specific register class. This is only
/// used between instruction selection and MachineInstr creation, before
/// virtual registers have been created for all the instructions, and it's
/// only needed in cases where the register classes implied by the
- /// instructions are insufficient. The actual MachineInstrs to perform
- /// the copy are emitted with the TargetInstrInfo::copyRegToReg hook.
+ /// instructions are insufficient. It is emitted as a COPY MachineInstr.
COPY_TO_REGCLASS = 10,
/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
@@ -72,7 +74,11 @@ namespace TargetOpcode {
/// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
/// After register coalescing references of v1024 should be replace with
/// v1027:3, v1025 with v1027:4, etc.
- REG_SEQUENCE = 12
+ REG_SEQUENCE = 12,
+
+ /// COPY - Target-independent register copy. This instruction can also be
+ /// used to copy between subregisters of virtual registers.
+ COPY = 13
};
} // end namespace TargetOpcode
} // end namespace llvm
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index 7c37b73a2b99..f6ac2b7b1a8f 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -115,6 +115,11 @@ public:
return RegSet.count(Reg);
}
+ /// contains - Return true if both registers are in this class.
+ bool contains(unsigned Reg1, unsigned Reg2) const {
+ return contains(Reg1) && contains(Reg2);
+ }
+
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(EVT vt) const {
@@ -313,11 +318,11 @@ public:
return Reg >= FirstVirtualRegister;
}
- /// getPhysicalRegisterRegClass - Returns the Register Class of a physical
- /// register of the given type. If type is EVT::Other, then just return any
- /// register class the register belongs to.
- virtual const TargetRegisterClass *
- getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const;
+ /// getMinimalPhysRegClass - Returns the Register Class of a physical
+ /// register of the given type, picking the most sub register class of
+ /// the right type that contains this physreg.
+ const TargetRegisterClass *
+ getMinimalPhysRegClass(unsigned Reg, EVT VT = MVT::Other) const;
/// getAllocatableSet - Returns a bitset indexed by register number
/// indicating if a register is allocatable or not. If a register class is
@@ -438,11 +443,6 @@ public:
virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF = 0)
const = 0;
- /// getCalleeSavedRegClasses - Return a null-terminated list of the preferred
- /// register classes to spill each callee saved register with. The order and
- /// length of this list match the getCalleeSaveRegs() list.
- virtual const TargetRegisterClass* const *getCalleeSavedRegClasses(
- const MachineFunction *MF) const =0;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses
@@ -456,7 +456,7 @@ public:
virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0;
/// getSubRegIndex - For a given register pair, return the sub-register index
- /// if the are second register is a sub-register of the first. Return zero
+ /// if the second register is a sub-register of the first. Return zero
/// otherwise.
virtual unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const = 0;
@@ -470,14 +470,15 @@ public:
return 0;
}
- /// canCombinedSubRegIndex - Given a register class and a list of sub-register
- /// indices, return true if it's possible to combine the sub-register indices
- /// into one that corresponds to a larger sub-register. Return the new sub-
- /// register index by reference. Note the new index by be zero if the given
- /// sub-registers combined to form the whole register.
- virtual bool canCombinedSubRegIndex(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const {
+ /// canCombineSubRegIndices - Given a register class and a list of
+ /// subregister indices, return true if it's possible to combine the
+ /// subregister indices into one that corresponds to a larger
+ /// subregister. Return the new subregister index by reference. Note the
+ /// new index may be zero if the given subregisters can be combined to
+ /// form the whole register.
+ virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
+ SmallVectorImpl<unsigned> &SubIndices,
+ unsigned &NewSubIdx) const {
return 0;
}
@@ -490,6 +491,23 @@ public:
return 0;
}
+ /// composeSubRegIndices - Return the subregister index you get from composing
+ /// two subregister indices.
+ ///
+ /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
+ /// returns c. Note that composeSubRegIndices does not tell you about illegal
+ /// compositions. If R does not have a subreg a, or R:a does not have a subreg
+ /// b, composeSubRegIndices doesn't tell you.
+ ///
+ /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
+ /// ssub_0:S0 - ssub_3:S3 subregs.
+ /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
+ ///
+ virtual unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+ // This default implementation is correct for most targets.
+ return b;
+ }
+
//===--------------------------------------------------------------------===//
// Register Class Information
//
@@ -506,8 +524,8 @@ public:
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class TargetOperandInfo.
const TargetRegisterClass *getRegClass(unsigned i) const {
- assert(i <= getNumRegClasses() && "Register Class ID out of range");
- return i ? RegClassBegin[i - 1] : NULL;
+ assert(i < getNumRegClasses() && "Register Class ID out of range");
+ return RegClassBegin[i];
}
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
diff --git a/include/llvm/Transforms/IPO.h b/include/llvm/Transforms/IPO.h
index 5e1790442293..8fb4b63c6463 100644
--- a/include/llvm/Transforms/IPO.h
+++ b/include/llvm/Transforms/IPO.h
@@ -45,6 +45,11 @@ ModulePass *createStripNonDebugSymbolsPass();
ModulePass *createStripDebugDeclarePass();
//===----------------------------------------------------------------------===//
+//
+// These pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
/// createLowerSetJmpPass - This function lowers the setjmp/longjmp intrinsics
/// to invoke/unwind instructions. This should really be part of the C/C++
/// front-end, but it's so much easier to write transformations in LLVM proper.
diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 5279e9620f0a..0f5445077bef 100644
--- a/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -66,24 +66,6 @@ void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
//
void ReplaceInstWithInst(Instruction *From, Instruction *To);
-/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
-/// instruction before ScanFrom) checking to see if we have the value at the
-/// memory address *Ptr locally available within a small number of instructions.
-/// If the value is available, return it.
-///
-/// If not, return the iterator for the last validated instruction that the
-/// value would be live through. If we scanned the entire block and didn't find
-/// something that invalidates *Ptr or provides it, ScanFrom would be left at
-/// begin() and this returns null. ScanFrom could also be left
-///
-/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
-/// it is set to 0, it will scan the whole block. You can also optionally
-/// specify an alias analysis implementation, which makes this more precise.
-Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
- BasicBlock::iterator &ScanFrom,
- unsigned MaxInstsToScan = 6,
- AliasAnalysis *AA = 0);
-
/// FindFunctionBackedges - Analyze the specified function to find all of the
/// loop backedges in the function and return them. This is a relatively cheap
/// (compared to computing dominators and loop info) analysis.
diff --git a/include/llvm/Transforms/Utils/BuildLibCalls.h b/include/llvm/Transforms/Utils/BuildLibCalls.h
index 6df3469ec064..c75c14277ff6 100644
--- a/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -34,6 +34,10 @@ namespace llvm {
/// and the return value has 'i8*' type.
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD);
+ /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+ Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const TargetData *TD);
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h
index 22bdc99ac18d..1ca4981ccbbe 100644
--- a/include/llvm/Transforms/Utils/Cloning.h
+++ b/include/llvm/Transforms/Utils/Cloning.h
@@ -18,7 +18,7 @@
#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
#define LLVM_TRANSFORMS_UTILS_CLONING_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ValueHandle.h"
@@ -46,7 +46,7 @@ class AllocaInst;
/// CloneModule - Return an exact copy of the specified module
///
Module *CloneModule(const Module *M);
-Module *CloneModule(const Module *M, DenseMap<const Value*, Value*> &ValueMap);
+Module *CloneModule(const Module *M, ValueMap<const Value*, Value*> &VMap);
/// ClonedCodeInfo - This struct can be used to capture information about code
/// being cloned, while it is being cloned.
@@ -89,7 +89,7 @@ struct ClonedCodeInfo {
/// incoming edges.
///
/// The correlation between instructions in the source and result basic blocks
-/// is recorded in the ValueMap map.
+/// is recorded in the VMap map.
///
/// If you have a particular suffix you'd like to use to add to any cloned
/// names, specify it as the optional third parameter.
@@ -102,34 +102,34 @@ struct ClonedCodeInfo {
/// parameter.
///
BasicBlock *CloneBasicBlock(const BasicBlock *BB,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
const Twine &NameSuffix = "", Function *F = 0,
ClonedCodeInfo *CodeInfo = 0);
/// CloneLoop - Clone Loop. Clone dominator info for loop insiders. Populate
-/// ValueMap using old blocks to new blocks mapping.
+/// VMap using old blocks to new blocks mapping.
Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI,
- DenseMap<const Value *, Value *> &ValueMap, Pass *P);
+ ValueMap<const Value *, Value *> &VMap, Pass *P);
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
-/// in the ValueMap are changed to refer to their mapped value instead of the
-/// original one. If any of the arguments to the function are in the ValueMap,
-/// the arguments are deleted from the resultant function. The ValueMap is
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
/// updated to include mappings from all of the instructions and basicblocks in
/// the function from their old to new values. The final argument captures
/// information about the cloned code if non-null.
///
Function *CloneFunction(const Function *F,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
ClonedCodeInfo *CodeInfo = 0);
-/// CloneFunction - Version of the function that doesn't need the ValueMap.
+/// CloneFunction - Version of the function that doesn't need the VMap.
///
inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
- DenseMap<const Value*, Value*> ValueMap;
- return CloneFunction(F, ValueMap, CodeInfo);
+ ValueMap<const Value*, Value*> VMap;
+ return CloneFunction(F, VMap, CodeInfo);
}
/// Clone OldFunc into NewFunc, transforming the old arguments into references
@@ -139,7 +139,7 @@ inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
/// specified suffix to all values cloned.
///
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0);
@@ -152,7 +152,7 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// dead. Since this doesn't produce an exactly copy of the input, it can't be
/// used for things like CloneFunction or CloneModule.
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0,
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index bb6fd564d8f5..b2779707276c 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -31,17 +31,6 @@ class TargetData;
template<typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
-// Local analysis.
-//
-
-/// isSafeToLoadUnconditionally - Return true if we know that executing a load
-/// from this value cannot trap. If it is not obviously safe to load from the
-/// specified pointer, we do a quick local scan of the basic block containing
-/// ScanFrom, to determine if the address is already accessed.
-bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD = 0);
-
-//===----------------------------------------------------------------------===//
// Local constant propagation.
//
diff --git a/include/llvm/Type.h b/include/llvm/Type.h
index 52229acb3238..617ef69de467 100644
--- a/include/llvm/Type.h
+++ b/include/llvm/Type.h
@@ -504,19 +504,19 @@ inline void PATypeHandle::removeUser() {
/// reference to the type.
///
inline Type* PATypeHolder::get() const {
+ if (Ty == 0) return 0;
const Type *NewTy = Ty->getForwardedType();
if (!NewTy) return const_cast<Type*>(Ty);
return *const_cast<PATypeHolder*>(this) = NewTy;
}
inline void PATypeHolder::addRef() {
- assert(Ty && "Type Holder has a null type!");
- if (Ty->isAbstract())
+ if (Ty && Ty->isAbstract())
Ty->addRef();
}
inline void PATypeHolder::dropRef() {
- if (Ty->isAbstract())
+ if (Ty && Ty->isAbstract())
Ty->dropRef();
}
diff --git a/include/llvm/Use.h b/include/llvm/Use.h
index 970f69b9da88..2759338f42aa 100644
--- a/include/llvm/Use.h
+++ b/include/llvm/Use.h
@@ -27,6 +27,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/ADT/PointerIntPair.h"
+#include <cstddef>
#include <iterator>
namespace llvm {
diff --git a/include/llvm/Value.h b/include/llvm/Value.h
index bc25a0f40144..16b620778673 100644
--- a/include/llvm/Value.h
+++ b/include/llvm/Value.h
@@ -93,8 +93,8 @@ protected:
/// printing behavior.
virtual void printCustom(raw_ostream &O) const;
-public:
Value(const Type *Ty, unsigned scid);
+public:
virtual ~Value();
/// dump - Support for debugging, callable in GDB: V->dump()
@@ -210,7 +210,7 @@ public:
UndefValueVal, // This is an instance of UndefValue
BlockAddressVal, // This is an instance of BlockAddress
ConstantExprVal, // This is an instance of ConstantExpr
- ConstantAggregateZeroVal, // This is an instance of ConstantAggregateNull
+ ConstantAggregateZeroVal, // This is an instance of ConstantAggregateZero
ConstantIntVal, // This is an instance of ConstantInt
ConstantFPVal, // This is an instance of ConstantFP
ConstantArrayVal, // This is an instance of ConstantArray