aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Transforms/Instrumentation
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms/Instrumentation')
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp1036
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp58
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BlackList.h57
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp19
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp299
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h4
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp1985
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp19
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp160
13 files changed, 3050 insertions, 621 deletions
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index b7be4625ca8d..623c4705061e 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -15,41 +15,47 @@
#define DEBUG_TYPE "asan"
-#include "BlackList.h"
-#include "llvm/Function.h"
-#include "llvm/IRBuilder.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
+#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/DIBuilder.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/InstVisitor.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
-#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/BlackList.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-
-#include <string>
#include <algorithm>
+#include <string>
using namespace llvm;
static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
-static const uint64_t kDefaultShadowOffsetAndroid = 0;
+static const uint64_t kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
+static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
@@ -59,16 +65,22 @@ static const char *kAsanModuleCtorName = "asan.module_ctor";
static const char *kAsanModuleDtorName = "asan.module_dtor";
static const int kAsanCtorAndCtorPriority = 1;
static const char *kAsanReportErrorTemplate = "__asan_report_";
+static const char *kAsanReportLoadN = "__asan_report_load_n";
+static const char *kAsanReportStoreN = "__asan_report_store_n";
static const char *kAsanRegisterGlobalsName = "__asan_register_globals";
static const char *kAsanUnregisterGlobalsName = "__asan_unregister_globals";
static const char *kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
static const char *kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
-static const char *kAsanInitName = "__asan_init";
+static const char *kAsanInitName = "__asan_init_v3";
static const char *kAsanHandleNoReturnName = "__asan_handle_no_return";
static const char *kAsanMappingOffsetName = "__asan_mapping_offset";
static const char *kAsanMappingScaleName = "__asan_mapping_scale";
static const char *kAsanStackMallocName = "__asan_stack_malloc";
static const char *kAsanStackFreeName = "__asan_stack_free";
+static const char *kAsanGenPrefix = "__asan_gen_";
+static const char *kAsanPoisonStackMemoryName = "__asan_poison_stack_memory";
+static const char *kAsanUnpoisonStackMemoryName =
+ "__asan_unpoison_stack_memory";
static const int kAsanStackLeftRedzoneMagic = 0xf1;
static const int kAsanStackMidRedzoneMagic = 0xf2;
@@ -112,9 +124,10 @@ static cl::opt<bool> ClInitializers("asan-initialization-order",
cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false));
static cl::opt<bool> ClMemIntrin("asan-memintrin",
cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true));
-// This flag may need to be replaced with -fasan-blacklist.
-static cl::opt<std::string> ClBlackListFile("asan-blacklist",
- cl::desc("File containing the list of functions to ignore "
+static cl::opt<bool> ClRealignStack("asan-realign-stack",
+ cl::desc("Realign stack to 32"), cl::Hidden, cl::init(true));
+static cl::opt<std::string> ClBlacklistFile("asan-blacklist",
+ cl::desc("File containing the list of objects to ignore "
"during instrumentation"), cl::Hidden);
// These flags allow to change the shadow mapping.
@@ -124,6 +137,9 @@ static cl::opt<int> ClMappingScale("asan-mapping-scale",
cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0));
static cl::opt<int> ClMappingOffsetLog("asan-mapping-offset-log",
cl::desc("offset of asan shadow mapping"), cl::Hidden, cl::init(-1));
+static cl::opt<bool> ClShort64BitOffset("asan-short-64bit-mapping-offset",
+ cl::desc("Use short immediate constant as the mapping offset for 64bit"),
+ cl::Hidden, cl::init(true));
// Optimization flags. Not user visible, used mostly for testing
// and benchmarking the tool.
@@ -135,6 +151,10 @@ static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp",
static cl::opt<bool> ClOptGlobals("asan-opt-globals",
cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClCheckLifetime("asan-check-lifetime",
+ cl::desc("Use llvm.lifetime intrinsics to insert extra checks"),
+ cl::Hidden, cl::init(false));
+
// Debug flags.
static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
cl::init(0));
@@ -148,74 +168,332 @@ static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
cl::Hidden, cl::init(-1));
namespace {
+/// A set of dynamically initialized globals extracted from metadata.
+class SetOfDynamicallyInitializedGlobals {
+ public:
+ void Init(Module& M) {
+ // Clang generates metadata identifying all dynamically initialized globals.
+ NamedMDNode *DynamicGlobals =
+ M.getNamedMetadata("llvm.asan.dynamically_initialized_globals");
+ if (!DynamicGlobals)
+ return;
+ for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) {
+ MDNode *MDN = DynamicGlobals->getOperand(i);
+ assert(MDN->getNumOperands() == 1);
+ Value *VG = MDN->getOperand(0);
+ // The optimizer may optimize away a global entirely, in which case we
+ // cannot instrument access to it.
+ if (!VG)
+ continue;
+ DynInitGlobals.insert(cast<GlobalVariable>(VG));
+ }
+ }
+ bool Contains(GlobalVariable *G) { return DynInitGlobals.count(G) != 0; }
+ private:
+ SmallSet<GlobalValue*, 32> DynInitGlobals;
+};
+
+/// This struct defines the shadow mapping using the rule:
+/// shadow = (mem >> Scale) ADD-or-OR Offset.
+struct ShadowMapping {
+ int Scale;
+ uint64_t Offset;
+ bool OrShadowOffset;
+};
+
+static ShadowMapping getShadowMapping(const Module &M, int LongSize,
+ bool ZeroBaseShadow) {
+ llvm::Triple TargetTriple(M.getTargetTriple());
+ bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
+ bool IsMacOSX = TargetTriple.getOS() == llvm::Triple::MacOSX;
+ bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64;
+ bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
+
+ ShadowMapping Mapping;
+
+ // OR-ing shadow offset if more efficient (at least on x86),
+ // but on ppc64 we have to use add since the shadow offset is not neccesary
+ // 1/8-th of the address space.
+ Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset;
+
+ Mapping.Offset = (IsAndroid || ZeroBaseShadow) ? 0 :
+ (LongSize == 32 ? kDefaultShadowOffset32 :
+ IsPPC64 ? kPPC64_ShadowOffset64 : kDefaultShadowOffset64);
+ if (!ZeroBaseShadow && ClShort64BitOffset && IsX86_64 && !IsMacOSX) {
+ assert(LongSize == 64);
+ Mapping.Offset = kDefaultShort64bitShadowOffset;
+ }
+ if (!ZeroBaseShadow && ClMappingOffsetLog >= 0) {
+ // Zero offset log is the special case.
+ Mapping.Offset = (ClMappingOffsetLog == 0) ? 0 : 1ULL << ClMappingOffsetLog;
+ }
+
+ Mapping.Scale = kDefaultShadowScale;
+ if (ClMappingScale) {
+ Mapping.Scale = ClMappingScale;
+ }
+
+ return Mapping;
+}
+
+static size_t RedzoneSizeForScale(int MappingScale) {
+ // Redzone used for stack and globals is at least 32 bytes.
+ // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
+ return std::max(32U, 1U << MappingScale);
+}
+
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer : public FunctionPass {
- AddressSanitizer();
- virtual const char *getPassName() const;
+ AddressSanitizer(bool CheckInitOrder = true,
+ bool CheckUseAfterReturn = false,
+ bool CheckLifetime = false,
+ StringRef BlacklistFile = StringRef(),
+ bool ZeroBaseShadow = false)
+ : FunctionPass(ID),
+ CheckInitOrder(CheckInitOrder || ClInitializers),
+ CheckUseAfterReturn(CheckUseAfterReturn || ClUseAfterReturn),
+ CheckLifetime(CheckLifetime || ClCheckLifetime),
+ BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
+ : BlacklistFile),
+ ZeroBaseShadow(ZeroBaseShadow) {}
+ virtual const char *getPassName() const {
+ return "AddressSanitizerFunctionPass";
+ }
void instrumentMop(Instruction *I);
- void instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB,
- Value *Addr, uint32_t TypeSize, bool IsWrite);
+ void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
+ Value *Addr, uint32_t TypeSize, bool IsWrite,
+ Value *SizeArgument);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
- bool IsWrite, size_t AccessSizeIndex);
+ bool IsWrite, size_t AccessSizeIndex,
+ Value *SizeArgument);
bool instrumentMemIntrinsic(MemIntrinsic *MI);
void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr,
Value *Size,
Instruction *InsertBefore, bool IsWrite);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool runOnFunction(Function &F);
- void createInitializerPoisonCalls(Module &M,
- Value *FirstAddr, Value *LastAddr);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
- bool poisonStackInFunction(Function &F);
+ void emitShadowMapping(Module &M, IRBuilder<> &IRB) const;
virtual bool doInitialization(Module &M);
- virtual bool doFinalization(Module &M);
- bool insertGlobalRedzones(Module &M);
static char ID; // Pass identification, replacement for typeid
private:
- uint64_t getAllocaSizeInBytes(AllocaInst *AI) {
- Type *Ty = AI->getAllocatedType();
- uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
- return SizeInBytes;
- }
- uint64_t getAlignedSize(uint64_t SizeInBytes) {
- return ((SizeInBytes + RedzoneSize - 1)
- / RedzoneSize) * RedzoneSize;
- }
- uint64_t getAlignedAllocaSize(AllocaInst *AI) {
- uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
- return getAlignedSize(SizeInBytes);
- }
+ void initializeCallbacks(Module &M);
- Function *checkInterfaceFunction(Constant *FuncOrBitcast);
bool ShouldInstrumentGlobal(GlobalVariable *G);
- void PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB,
- Value *ShadowBase, bool DoPoison);
bool LooksLikeCodeInBug11395(Instruction *I);
void FindDynamicInitializers(Module &M);
- bool HasDynamicInitializer(GlobalVariable *G);
+
+ bool CheckInitOrder;
+ bool CheckUseAfterReturn;
+ bool CheckLifetime;
+ SmallString<64> BlacklistFile;
+ bool ZeroBaseShadow;
LLVMContext *C;
DataLayout *TD;
- uint64_t MappingOffset;
- int MappingScale;
- size_t RedzoneSize;
int LongSize;
Type *IntptrTy;
- Type *IntptrPtrTy;
+ ShadowMapping Mapping;
Function *AsanCtorFunction;
Function *AsanInitFunction;
- Function *AsanStackMallocFunc, *AsanStackFreeFunc;
Function *AsanHandleNoReturnFunc;
- Instruction *CtorInsertBefore;
OwningPtr<BlackList> BL;
// This array is indexed by AccessIsWrite and log2(AccessSize).
Function *AsanErrorCallback[2][kNumberOfAccessSizes];
+ // This array is indexed by AccessIsWrite.
+ Function *AsanErrorCallbackSized[2];
InlineAsm *EmptyAsm;
- SmallSet<GlobalValue*, 32> DynamicallyInitializedGlobals;
- SmallSet<GlobalValue*, 32> GlobalsCreatedByAsan;
+ SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
+
+ friend struct FunctionStackPoisoner;
+};
+
+class AddressSanitizerModule : public ModulePass {
+ public:
+ AddressSanitizerModule(bool CheckInitOrder = true,
+ StringRef BlacklistFile = StringRef(),
+ bool ZeroBaseShadow = false)
+ : ModulePass(ID),
+ CheckInitOrder(CheckInitOrder || ClInitializers),
+ BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
+ : BlacklistFile),
+ ZeroBaseShadow(ZeroBaseShadow) {}
+ bool runOnModule(Module &M);
+ static char ID; // Pass identification, replacement for typeid
+ virtual const char *getPassName() const {
+ return "AddressSanitizerModule";
+ }
+
+ private:
+ void initializeCallbacks(Module &M);
+
+ bool ShouldInstrumentGlobal(GlobalVariable *G);
+ void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
+ size_t RedzoneSize() const {
+ return RedzoneSizeForScale(Mapping.Scale);
+ }
+
+ bool CheckInitOrder;
+ SmallString<64> BlacklistFile;
+ bool ZeroBaseShadow;
+
+ OwningPtr<BlackList> BL;
+ SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
+ Type *IntptrTy;
+ LLVMContext *C;
+ DataLayout *TD;
+ ShadowMapping Mapping;
+ Function *AsanPoisonGlobals;
+ Function *AsanUnpoisonGlobals;
+ Function *AsanRegisterGlobals;
+ Function *AsanUnregisterGlobals;
+};
+
+// Stack poisoning does not play well with exception handling.
+// When an exception is thrown, we essentially bypass the code
+// that unpoisones the stack. This is why the run-time library has
+// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
+// stack in the interceptor. This however does not work inside the
+// actual function which catches the exception. Most likely because the
+// compiler hoists the load of the shadow value somewhere too high.
+// This causes asan to report a non-existing bug on 453.povray.
+// It sounds like an LLVM bug.
+struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
+ Function &F;
+ AddressSanitizer &ASan;
+ DIBuilder DIB;
+ LLVMContext *C;
+ Type *IntptrTy;
+ Type *IntptrPtrTy;
+ ShadowMapping Mapping;
+
+ SmallVector<AllocaInst*, 16> AllocaVec;
+ SmallVector<Instruction*, 8> RetVec;
+ uint64_t TotalStackSize;
+ unsigned StackAlignment;
+
+ Function *AsanStackMallocFunc, *AsanStackFreeFunc;
+ Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
+
+ // Stores a place and arguments of poisoning/unpoisoning call for alloca.
+ struct AllocaPoisonCall {
+ IntrinsicInst *InsBefore;
+ uint64_t Size;
+ bool DoPoison;
+ };
+ SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec;
+
+ // Maps Value to an AllocaInst from which the Value is originated.
+ typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy;
+ AllocaForValueMapTy AllocaForValue;
+
+ FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
+ : F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C),
+ IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
+ Mapping(ASan.Mapping),
+ TotalStackSize(0), StackAlignment(1 << Mapping.Scale) {}
+
+ bool runOnFunction() {
+ if (!ClStack) return false;
+ // Collect alloca, ret, lifetime instructions etc.
+ for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
+ DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
+ BasicBlock *BB = *DI;
+ visit(*BB);
+ }
+ if (AllocaVec.empty()) return false;
+
+ initializeCallbacks(*F.getParent());
+
+ poisonStack();
+
+ if (ClDebugStack) {
+ DEBUG(dbgs() << F);
+ }
+ return true;
+ }
+
+ // Finds all static Alloca instructions and puts
+ // poisoned red zones around all of them.
+ // Then unpoison everything back before the function returns.
+ void poisonStack();
+
+ // ----------------------- Visitors.
+ /// \brief Collect all Ret instructions.
+ void visitReturnInst(ReturnInst &RI) {
+ RetVec.push_back(&RI);
+ }
+
+ /// \brief Collect Alloca instructions we want (and can) handle.
+ void visitAllocaInst(AllocaInst &AI) {
+ if (!isInterestingAlloca(AI)) return;
+
+ StackAlignment = std::max(StackAlignment, AI.getAlignment());
+ AllocaVec.push_back(&AI);
+ uint64_t AlignedSize = getAlignedAllocaSize(&AI);
+ TotalStackSize += AlignedSize;
+ }
+
+ /// \brief Collect lifetime intrinsic calls to check for use-after-scope
+ /// errors.
+ void visitIntrinsicInst(IntrinsicInst &II) {
+ if (!ASan.CheckLifetime) return;
+ Intrinsic::ID ID = II.getIntrinsicID();
+ if (ID != Intrinsic::lifetime_start &&
+ ID != Intrinsic::lifetime_end)
+ return;
+ // Found lifetime intrinsic, add ASan instrumentation if necessary.
+ ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
+ // If size argument is undefined, don't do anything.
+ if (Size->isMinusOne()) return;
+ // Check that size doesn't saturate uint64_t and can
+ // be stored in IntptrTy.
+ const uint64_t SizeValue = Size->getValue().getLimitedValue();
+ if (SizeValue == ~0ULL ||
+ !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
+ return;
+ // Find alloca instruction that corresponds to llvm.lifetime argument.
+ AllocaInst *AI = findAllocaForValue(II.getArgOperand(1));
+ if (!AI) return;
+ bool DoPoison = (ID == Intrinsic::lifetime_end);
+ AllocaPoisonCall APC = {&II, SizeValue, DoPoison};
+ AllocaPoisonCallVec.push_back(APC);
+ }
+
+ // ---------------------- Helpers.
+ void initializeCallbacks(Module &M);
+
+ // Check if we want (and can) handle this alloca.
+ bool isInterestingAlloca(AllocaInst &AI) {
+ return (!AI.isArrayAllocation() &&
+ AI.isStaticAlloca() &&
+ AI.getAllocatedType()->isSized());
+ }
+
+ size_t RedzoneSize() const {
+ return RedzoneSizeForScale(Mapping.Scale);
+ }
+ uint64_t getAllocaSizeInBytes(AllocaInst *AI) {
+ Type *Ty = AI->getAllocatedType();
+ uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty);
+ return SizeInBytes;
+ }
+ uint64_t getAlignedSize(uint64_t SizeInBytes) {
+ size_t RZ = RedzoneSize();
+ return ((SizeInBytes + RZ - 1) / RZ) * RZ;
+ }
+ uint64_t getAlignedAllocaSize(AllocaInst *AI) {
+ uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
+ return getAlignedSize(SizeInBytes);
+ }
+ /// Finds alloca where the value comes from.
+ AllocaInst *findAllocaForValue(Value *V);
+ void poisonRedZones(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB,
+ Value *ShadowBase, bool DoPoison);
+ void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> IRB, bool DoPoison);
};
} // namespace
@@ -224,13 +502,21 @@ char AddressSanitizer::ID = 0;
INITIALIZE_PASS(AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
false, false)
-AddressSanitizer::AddressSanitizer() : FunctionPass(ID) { }
-FunctionPass *llvm::createAddressSanitizerPass() {
- return new AddressSanitizer();
+FunctionPass *llvm::createAddressSanitizerFunctionPass(
+ bool CheckInitOrder, bool CheckUseAfterReturn, bool CheckLifetime,
+ StringRef BlacklistFile, bool ZeroBaseShadow) {
+ return new AddressSanitizer(CheckInitOrder, CheckUseAfterReturn,
+ CheckLifetime, BlacklistFile, ZeroBaseShadow);
}
-const char *AddressSanitizer::getPassName() const {
- return "AddressSanitizer";
+char AddressSanitizerModule::ID = 0;
+INITIALIZE_PASS(AddressSanitizerModule, "asan-module",
+ "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
+ "ModulePass", false, false)
+ModulePass *llvm::createAddressSanitizerModulePass(
+ bool CheckInitOrder, StringRef BlacklistFile, bool ZeroBaseShadow) {
+ return new AddressSanitizerModule(CheckInitOrder, BlacklistFile,
+ ZeroBaseShadow);
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
@@ -242,38 +528,44 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
// Create a constant for Str so that we can pass it to the run-time lib.
static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) {
Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
- return new GlobalVariable(M, StrConst->getType(), true,
- GlobalValue::PrivateLinkage, StrConst, "");
+ GlobalVariable *GV = new GlobalVariable(M, StrConst->getType(), true,
+ GlobalValue::PrivateLinkage, StrConst,
+ kAsanGenPrefix);
+ GV->setUnnamedAddr(true); // Ok to merge these.
+ GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
+ return GV;
+}
+
+static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
+ return G->getName().find(kAsanGenPrefix) == 0;
}
Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
// Shadow >> scale
- Shadow = IRB.CreateLShr(Shadow, MappingScale);
- if (MappingOffset == 0)
+ Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
+ if (Mapping.Offset == 0)
return Shadow;
// (Shadow >> scale) | offset
- return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy,
- MappingOffset));
+ if (Mapping.OrShadowOffset)
+ return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
+ else
+ return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
}
void AddressSanitizer::instrumentMemIntrinsicParam(
Instruction *OrigIns,
Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) {
+ IRBuilder<> IRB(InsertBefore);
+ if (Size->getType() != IntptrTy)
+ Size = IRB.CreateIntCast(Size, IntptrTy, false);
// Check the first byte.
- {
- IRBuilder<> IRB(InsertBefore);
- instrumentAddress(OrigIns, IRB, Addr, 8, IsWrite);
- }
+ instrumentAddress(OrigIns, InsertBefore, Addr, 8, IsWrite, Size);
// Check the last byte.
- {
- IRBuilder<> IRB(InsertBefore);
- Value *SizeMinusOne = IRB.CreateSub(
- Size, ConstantInt::get(Size->getType(), 1));
- SizeMinusOne = IRB.CreateIntCast(SizeMinusOne, IntptrTy, false);
- Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
- Value *AddrPlusSizeMinisOne = IRB.CreateAdd(AddrLong, SizeMinusOne);
- instrumentAddress(OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite);
- }
+ IRB.SetInsertPoint(InsertBefore);
+ Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ Value *AddrLast = IRB.CreateAdd(AddrLong, SizeMinusOne);
+ instrumentAddress(OrigIns, InsertBefore, AddrLast, 8, IsWrite, Size);
}
// Instrument memset/memmove/memcpy
@@ -328,30 +620,6 @@ static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) {
return NULL;
}
-void AddressSanitizer::FindDynamicInitializers(Module& M) {
- // Clang generates metadata identifying all dynamically initialized globals.
- NamedMDNode *DynamicGlobals =
- M.getNamedMetadata("llvm.asan.dynamically_initialized_globals");
- if (!DynamicGlobals)
- return;
- for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) {
- MDNode *MDN = DynamicGlobals->getOperand(i);
- assert(MDN->getNumOperands() == 1);
- Value *VG = MDN->getOperand(0);
- // The optimizer may optimize away a global entirely, in which case we
- // cannot instrument access to it.
- if (!VG)
- continue;
-
- GlobalVariable *G = cast<GlobalVariable>(VG);
- DynamicallyInitializedGlobals.insert(G);
- }
-}
-// Returns true if a global variable is initialized dynamically in this TU.
-bool AddressSanitizer::HasDynamicInitializer(GlobalVariable *G) {
- return DynamicallyInitializedGlobals.count(G);
-}
-
void AddressSanitizer::instrumentMop(Instruction *I) {
bool IsWrite = false;
Value *Addr = isInterestingMemoryAccess(I, &IsWrite);
@@ -360,14 +628,12 @@ void AddressSanitizer::instrumentMop(Instruction *I) {
if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
- if (!ClInitializers)
+ if (!CheckInitOrder)
return;
// If a global variable does not have dynamic initialization we don't
- // have to instrument it. However, if a global has external linkage, we
- // assume it has dynamic initialization, as it may have an initializer
- // in a different TU.
- if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
- !HasDynamicInitializer(G))
+ // have to instrument it. However, if a global does not have initailizer
+ // at all, we assume it has dynamic initializer (in other TU).
+ if (G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G))
return;
}
}
@@ -378,21 +644,31 @@ void AddressSanitizer::instrumentMop(Instruction *I) {
assert(OrigTy->isSized());
uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
- if (TypeSize != 8 && TypeSize != 16 &&
- TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
- // Ignore all unusual sizes.
- return;
- }
+ assert((TypeSize % 8) == 0);
+ // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check.
+ if (TypeSize == 8 || TypeSize == 16 ||
+ TypeSize == 32 || TypeSize == 64 || TypeSize == 128)
+ return instrumentAddress(I, I, Addr, TypeSize, IsWrite, 0);
+ // Instrument unusual size (but still multiple of 8).
+ // We can not do it with a single check, so we do 1-byte check for the first
+ // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
+ // to report the actual access size.
IRBuilder<> IRB(I);
- instrumentAddress(I, IRB, Addr, TypeSize, IsWrite);
+ Value *LastByte = IRB.CreateIntToPtr(
+ IRB.CreateAdd(IRB.CreatePointerCast(Addr, IntptrTy),
+ ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
+ OrigPtrTy);
+ Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
+ instrumentAddress(I, I, Addr, 8, IsWrite, Size);
+ instrumentAddress(I, I, LastByte, 8, IsWrite, Size);
}
// Validate the result of Module::getOrInsertFunction called for an interface
// function of AddressSanitizer. If the instrumented module defines a function
// with the same name, their prototypes must match, otherwise
// getOrInsertFunction returns a bitcast.
-Function *AddressSanitizer::checkInterfaceFunction(Constant *FuncOrBitcast) {
+static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast);
FuncOrBitcast->dump();
report_fatal_error("trying to redefine an AddressSanitizer "
@@ -401,10 +677,12 @@ Function *AddressSanitizer::checkInterfaceFunction(Constant *FuncOrBitcast) {
Instruction *AddressSanitizer::generateCrashCode(
Instruction *InsertBefore, Value *Addr,
- bool IsWrite, size_t AccessSizeIndex) {
+ bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) {
IRBuilder<> IRB(InsertBefore);
- CallInst *Call = IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex],
- Addr);
+ CallInst *Call = SizeArgument
+ ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument)
+ : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
+
// We don't do Call->setDoesNotReturn() because the BB already has
// UnreachableInst at the end.
// This EmptyAsm is required to avoid callback merge.
@@ -415,7 +693,7 @@ Instruction *AddressSanitizer::generateCrashCode(
Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue,
uint32_t TypeSize) {
- size_t Granularity = 1 << MappingScale;
+ size_t Granularity = 1 << Mapping.Scale;
// Addr & (Granularity - 1)
Value *LastAccessedByte = IRB.CreateAnd(
AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
@@ -431,12 +709,14 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
}
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
- IRBuilder<> &IRB, Value *Addr,
- uint32_t TypeSize, bool IsWrite) {
+ Instruction *InsertBefore,
+ Value *Addr, uint32_t TypeSize,
+ bool IsWrite, Value *SizeArgument) {
+ IRBuilder<> IRB(InsertBefore);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
Type *ShadowTy = IntegerType::get(
- *C, std::max(8U, TypeSize >> MappingScale));
+ *C, std::max(8U, TypeSize >> Mapping.Scale));
Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
Value *ShadowPtr = memToShadow(AddrLong, IRB);
Value *CmpVal = Constant::getNullValue(ShadowTy);
@@ -445,7 +725,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
- size_t Granularity = 1 << MappingScale;
+ size_t Granularity = 1 << Mapping.Scale;
TerminatorInst *CrashTerm = 0;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
@@ -464,14 +744,13 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
CrashTerm = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), true);
}
- Instruction *Crash =
- generateCrashCode(CrashTerm, AddrLong, IsWrite, AccessSizeIndex);
+ Instruction *Crash = generateCrashCode(
+ CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument);
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
-void AddressSanitizer::createInitializerPoisonCalls(Module &M,
- Value *FirstAddr,
- Value *LastAddr) {
+void AddressSanitizerModule::createInitializerPoisonCalls(
+ Module &M, GlobalValue *ModuleName) {
// We do all of our poisoning and unpoisoning within _GLOBAL__I_a.
Function *GlobalInit = M.getFunction("_GLOBAL__I_a");
// If that function is not present, this TU contains no globals, or they have
@@ -482,16 +761,9 @@ void AddressSanitizer::createInitializerPoisonCalls(Module &M,
// Set up the arguments to our poison/unpoison functions.
IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt());
- // Declare our poisoning and unpoisoning functions.
- Function *AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
- AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
- Function *AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL));
- AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
-
// Add a call to poison all external globals before the given function starts.
- IRB.CreateCall2(AsanPoisonGlobals, FirstAddr, LastAddr);
+ Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
+ IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
// Add calls to unpoison all globals before each return instruction.
for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end();
@@ -502,14 +774,14 @@ void AddressSanitizer::createInitializerPoisonCalls(Module &M,
}
}
-bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) {
+bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
Type *Ty = cast<PointerType>(G->getType())->getElementType();
DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
if (BL->isIn(*G)) return false;
if (!Ty->isSized()) return false;
if (!G->hasInitializer()) return false;
- if (GlobalsCreatedByAsan.count(G)) return false; // Our own global.
+ if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
// Touch only those globals that will not be defined in other modules.
// Don't handle ODR type linkages since other modules may be built w/o asan.
if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
@@ -522,7 +794,7 @@ bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) {
if (G->isThreadLocal())
return false;
// For now, just ignore this Alloca if the alignment is large.
- if (G->getAlignment() > RedzoneSize) return false;
+ if (G->getAlignment() > RedzoneSize()) return false;
// Ignore all the globals with the names starting with "\01L_OBJC_".
// Many of those are put into the .cstring section. The linker compresses
@@ -561,10 +833,43 @@ bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) {
return true;
}
+void AddressSanitizerModule::initializeCallbacks(Module &M) {
+ IRBuilder<> IRB(*C);
+ // Declare our poisoning and unpoisoning functions.
+ AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, NULL));
+ AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
+ AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL));
+ AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
+ // Declare functions that register/unregister globals.
+ AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanRegisterGlobalsName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, NULL));
+ AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
+ AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanUnregisterGlobalsName,
+ IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
+}
+
// This function replaces all global variables with new variables that have
// trailing redzones. It also creates a function that poisons
// redzones and inserts this function into llvm.global_ctors.
-bool AddressSanitizer::insertGlobalRedzones(Module &M) {
+bool AddressSanitizerModule::runOnModule(Module &M) {
+ if (!ClGlobals) return false;
+ TD = getAnalysisIfAvailable<DataLayout>();
+ if (!TD)
+ return false;
+ BL.reset(new BlackList(BlacklistFile));
+ if (BL->isIn(M)) return false;
+ C = &(M.getContext());
+ int LongSize = TD->getPointerSizeInBits();
+ IntptrTy = Type::getIntNTy(*C, LongSize);
+ Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow);
+ initializeCallbacks(M);
+ DynamicallyInitializedGlobals.Init(M);
+
SmallVector<GlobalVariable *, 16> GlobalsToChange;
for (Module::GlobalListType::iterator G = M.global_begin(),
@@ -581,32 +886,48 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
// size_t size;
// size_t size_with_redzone;
// const char *name;
+ // const char *module_name;
// size_t has_dynamic_init;
// We initialize an array of such structures and pass it to a run-time call.
StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy,
IntptrTy, IntptrTy,
- IntptrTy, NULL);
+ IntptrTy, IntptrTy, NULL);
SmallVector<Constant *, 16> Initializers(n), DynamicInit;
- IRBuilder<> IRB(CtorInsertBefore);
- if (ClInitializers)
- FindDynamicInitializers(M);
+ Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
+ assert(CtorFunc);
+ IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
+
+ bool HasDynamicallyInitializedGlobals = false;
- // The addresses of the first and last dynamically initialized globals in
- // this TU. Used in initialization order checking.
- Value *FirstDynamic = 0, *LastDynamic = 0;
+ GlobalVariable *ModuleName = createPrivateGlobalForString(
+ M, M.getModuleIdentifier());
+ // We shouldn't merge same module names, as this string serves as unique
+ // module ID in runtime.
+ ModuleName->setUnnamedAddr(false);
for (size_t i = 0; i < n; i++) {
+ static const uint64_t kMaxGlobalRedzone = 1 << 18;
GlobalVariable *G = GlobalsToChange[i];
PointerType *PtrTy = cast<PointerType>(G->getType());
Type *Ty = PtrTy->getElementType();
uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
- uint64_t RightRedzoneSize = RedzoneSize +
- (RedzoneSize - (SizeInBytes % RedzoneSize));
+ uint64_t MinRZ = RedzoneSize();
+ // MinRZ <= RZ <= kMaxGlobalRedzone
+ // and trying to make RZ to be ~ 1/4 of SizeInBytes.
+ uint64_t RZ = std::max(MinRZ,
+ std::min(kMaxGlobalRedzone,
+ (SizeInBytes / MinRZ / 4) * MinRZ));
+ uint64_t RightRedzoneSize = RZ;
+ // Round up to MinRZ
+ if (SizeInBytes % MinRZ)
+ RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
+ assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
// Determine whether this global should be poisoned in initialization.
- bool GlobalHasDynamicInitializer = HasDynamicInitializer(G);
+ bool GlobalHasDynamicInitializer =
+ DynamicallyInitializedGlobals.Contains(G);
// Don't check initialization order if this global is blacklisted.
GlobalHasDynamicInitializer &= !BL->isInInit(*G);
@@ -615,18 +936,14 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
NewTy, G->getInitializer(),
Constant::getNullValue(RightRedZoneTy), NULL);
- SmallString<2048> DescriptionOfGlobal = G->getName();
- DescriptionOfGlobal += " (";
- DescriptionOfGlobal += M.getModuleIdentifier();
- DescriptionOfGlobal += ")";
- GlobalVariable *Name = createPrivateGlobalForString(M, DescriptionOfGlobal);
+ GlobalVariable *Name = createPrivateGlobalForString(M, G->getName());
// Create a new global variable with enough space for a redzone.
GlobalVariable *NewGlobal = new GlobalVariable(
M, NewTy, G->isConstant(), G->getLinkage(),
NewInitializer, "", G, G->getThreadLocalMode());
NewGlobal->copyAttributesFrom(G);
- NewGlobal->setAlignment(RedzoneSize);
+ NewGlobal->setAlignment(MinRZ);
Value *Indices2[2];
Indices2[0] = IRB.getInt32(0);
@@ -643,15 +960,13 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
ConstantInt::get(IntptrTy, SizeInBytes),
ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
ConstantExpr::getPointerCast(Name, IntptrTy),
+ ConstantExpr::getPointerCast(ModuleName, IntptrTy),
ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer),
NULL);
// Populate the first and last globals declared in this TU.
- if (ClInitializers && GlobalHasDynamicInitializer) {
- LastDynamic = ConstantExpr::getPointerCast(NewGlobal, IntptrTy);
- if (FirstDynamic == 0)
- FirstDynamic = LastDynamic;
- }
+ if (CheckInitOrder && GlobalHasDynamicInitializer)
+ HasDynamicallyInitializedGlobals = true;
DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
}
@@ -662,14 +977,8 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
// Create calls for poisoning before initializers run and unpoisoning after.
- if (ClInitializers && FirstDynamic && LastDynamic)
- createInitializerPoisonCalls(M, FirstDynamic, LastDynamic);
-
- Function *AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanRegisterGlobalsName, IRB.getVoidTy(),
- IntptrTy, IntptrTy, NULL));
- AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
-
+ if (CheckInitOrder && HasDynamicallyInitializedGlobals)
+ createInitializerPoisonCalls(M, ModuleName);
IRB.CreateCall2(AsanRegisterGlobals,
IRB.CreatePointerCast(AllGlobals, IntptrTy),
ConstantInt::get(IntptrTy, n));
@@ -681,12 +990,6 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
- Function *AsanUnregisterGlobals =
- checkInterfaceFunction(M.getOrInsertFunction(
- kAsanUnregisterGlobalsName,
- IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
- AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
-
IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
IRB.CreatePointerCast(AllGlobals, IntptrTy),
ConstantInt::get(IntptrTy, n));
@@ -696,33 +999,8 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
return true;
}
-// virtual
-bool AddressSanitizer::doInitialization(Module &M) {
- // Initialize the private fields. No one has accessed them before.
- TD = getAnalysisIfAvailable<DataLayout>();
-
- if (!TD)
- return false;
- BL.reset(new BlackList(ClBlackListFile));
-
- C = &(M.getContext());
- LongSize = TD->getPointerSizeInBits();
- IntptrTy = Type::getIntNTy(*C, LongSize);
- IntptrPtrTy = PointerType::get(IntptrTy, 0);
-
- AsanCtorFunction = Function::Create(
- FunctionType::get(Type::getVoidTy(*C), false),
- GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
- BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
- CtorInsertBefore = ReturnInst::Create(*C, AsanCtorBB);
-
- // call __asan_init in the module ctor.
- IRBuilder<> IRB(CtorInsertBefore);
- AsanInitFunction = checkInterfaceFunction(
- M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL));
- AsanInitFunction->setLinkage(Function::ExternalLinkage);
- IRB.CreateCall(AsanInitFunction);
-
+void AddressSanitizer::initializeCallbacks(Module &M) {
+ IRBuilder<> IRB(*C);
// Create __asan_report* callbacks.
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
@@ -736,74 +1014,68 @@ bool AddressSanitizer::doInitialization(Module &M) {
FunctionName, IRB.getVoidTy(), IntptrTy, NULL));
}
}
+ AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
- AsanStackMallocFunc = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL));
- AsanStackFreeFunc = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanStackFreeName, IRB.getVoidTy(),
- IntptrTy, IntptrTy, IntptrTy, NULL));
AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction(
kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
-
// We insert an empty inline asm after __asan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
/*hasSideEffects=*/true);
+}
- llvm::Triple targetTriple(M.getTargetTriple());
- bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::Android;
+void AddressSanitizer::emitShadowMapping(Module &M, IRBuilder<> &IRB) const {
+ // Tell the values of mapping offset and scale to the run-time.
+ GlobalValue *asan_mapping_offset =
+ new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
+ ConstantInt::get(IntptrTy, Mapping.Offset),
+ kAsanMappingOffsetName);
+ // Read the global, otherwise it may be optimized away.
+ IRB.CreateLoad(asan_mapping_offset, true);
+
+ GlobalValue *asan_mapping_scale =
+ new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
+ ConstantInt::get(IntptrTy, Mapping.Scale),
+ kAsanMappingScaleName);
+ // Read the global, otherwise it may be optimized away.
+ IRB.CreateLoad(asan_mapping_scale, true);
+}
- MappingOffset = isAndroid ? kDefaultShadowOffsetAndroid :
- (LongSize == 32 ? kDefaultShadowOffset32 : kDefaultShadowOffset64);
- if (ClMappingOffsetLog >= 0) {
- if (ClMappingOffsetLog == 0) {
- // special case
- MappingOffset = 0;
- } else {
- MappingOffset = 1ULL << ClMappingOffsetLog;
- }
- }
- MappingScale = kDefaultShadowScale;
- if (ClMappingScale) {
- MappingScale = ClMappingScale;
- }
- // Redzone used for stack and globals is at least 32 bytes.
- // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
- RedzoneSize = std::max(32, (int)(1 << MappingScale));
+// virtual
+bool AddressSanitizer::doInitialization(Module &M) {
+ // Initialize the private fields. No one has accessed them before.
+ TD = getAnalysisIfAvailable<DataLayout>();
+ if (!TD)
+ return false;
+ BL.reset(new BlackList(BlacklistFile));
+ DynamicallyInitializedGlobals.Init(M);
- if (ClMappingOffsetLog >= 0) {
- // Tell the run-time the current values of mapping offset and scale.
- GlobalValue *asan_mapping_offset =
- new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
- ConstantInt::get(IntptrTy, MappingOffset),
- kAsanMappingOffsetName);
- // Read the global, otherwise it may be optimized away.
- IRB.CreateLoad(asan_mapping_offset, true);
- }
- if (ClMappingScale) {
- GlobalValue *asan_mapping_scale =
- new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
- ConstantInt::get(IntptrTy, MappingScale),
- kAsanMappingScaleName);
- // Read the global, otherwise it may be optimized away.
- IRB.CreateLoad(asan_mapping_scale, true);
- }
+ C = &(M.getContext());
+ LongSize = TD->getPointerSizeInBits();
+ IntptrTy = Type::getIntNTy(*C, LongSize);
- appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority);
+ AsanCtorFunction = Function::Create(
+ FunctionType::get(Type::getVoidTy(*C), false),
+ GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
+ BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
+ // call __asan_init in the module ctor.
+ IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB));
+ AsanInitFunction = checkInterfaceFunction(
+ M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL));
+ AsanInitFunction->setLinkage(Function::ExternalLinkage);
+ IRB.CreateCall(AsanInitFunction);
- return true;
-}
+ Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow);
+ emitShadowMapping(M, IRB);
-bool AddressSanitizer::doFinalization(Module &M) {
- // We transform the globals at the very end so that the optimization analysis
- // works on the original globals.
- if (ClGlobals)
- return insertGlobalRedzones(M);
- return false;
+ appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority);
+ return true;
}
-
bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
// For each NSObject descendant having a +load method, this method is invoked
// by the ObjC runtime before any of the static constructors is called.
@@ -823,12 +1095,15 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
bool AddressSanitizer::runOnFunction(Function &F) {
if (BL->isIn(F)) return false;
if (&F == AsanCtorFunction) return false;
+ if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
+ initializeCallbacks(*F.getParent());
- // If needed, insert __asan_init before checking for AddressSafety attr.
+ // If needed, insert __asan_init before checking for SanitizeAddress attr.
maybeInsertAsanInitAtFunctionEntry(F);
- if (!F.getFnAttributes().hasAttribute(Attributes::AddressSafety))
+ if (!F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::SanitizeAddress))
return false;
if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
@@ -857,12 +1132,12 @@ bool AddressSanitizer::runOnFunction(Function &F) {
} else if (isa<MemIntrinsic>(BI) && ClMemIntrin) {
// ok, take it.
} else {
- if (CallInst *CI = dyn_cast<CallInst>(BI)) {
+ CallSite CS(BI);
+ if (CS) {
// A call inside BB.
TempsToInstrument.clear();
- if (CI->doesNotReturn()) {
- NoReturnCalls.push_back(CI);
- }
+ if (CS.doesNotReturn())
+ NoReturnCalls.push_back(CS.getInstruction());
}
continue;
}
@@ -887,7 +1162,8 @@ bool AddressSanitizer::runOnFunction(Function &F) {
NumInstrumented++;
}
- bool ChangedStack = poisonStackInFunction(F);
+ FunctionStackPoisoner FSP(F, *this);
+ bool ChangedStack = FSP.runOnFunction();
// We must unpoison the stack before every NoReturn call (throw, _exit, etc).
// See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
@@ -912,10 +1188,10 @@ static uint64_t ValueForPoison(uint64_t PoisonByte, size_t ShadowRedzoneSize) {
static void PoisonShadowPartialRightRedzone(uint8_t *Shadow,
size_t Size,
- size_t RedzoneSize,
+ size_t RZSize,
size_t ShadowGranularity,
uint8_t Magic) {
- for (size_t i = 0; i < RedzoneSize;
+ for (size_t i = 0; i < RZSize;
i+= ShadowGranularity, Shadow++) {
if (i + ShadowGranularity <= Size) {
*Shadow = 0; // fully addressable
@@ -927,10 +1203,35 @@ static void PoisonShadowPartialRightRedzone(uint8_t *Shadow,
}
}
-void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec,
- IRBuilder<> IRB,
- Value *ShadowBase, bool DoPoison) {
- size_t ShadowRZSize = RedzoneSize >> MappingScale;
+// Workaround for bug 11395: we don't want to instrument stack in functions
+// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
+// FIXME: remove once the bug 11395 is fixed.
+bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
+ if (LongSize != 32) return false;
+ CallInst *CI = dyn_cast<CallInst>(I);
+ if (!CI || !CI->isInlineAsm()) return false;
+ if (CI->getNumArgOperands() <= 5) return false;
+ // We have inline assembly with quite a few arguments.
+ return true;
+}
+
+void FunctionStackPoisoner::initializeCallbacks(Module &M) {
+ IRBuilder<> IRB(*C);
+ AsanStackMallocFunc = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL));
+ AsanStackFreeFunc = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanStackFreeName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, IntptrTy, NULL));
+ AsanPoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+}
+
+void FunctionStackPoisoner::poisonRedZones(
+ const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB, Value *ShadowBase,
+ bool DoPoison) {
+ size_t ShadowRZSize = RedzoneSize() >> Mapping.Scale;
assert(ShadowRZSize >= 1 && ShadowRZSize <= 4);
Type *RZTy = Type::getIntNTy(*C, ShadowRZSize * 8);
Type *RZPtrTy = PointerType::get(RZTy, 0);
@@ -946,12 +1247,12 @@ void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec,
IRB.CreateStore(PoisonLeft, IRB.CreateIntToPtr(ShadowBase, RZPtrTy));
// poison all other red zones.
- uint64_t Pos = RedzoneSize;
+ uint64_t Pos = RedzoneSize();
for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
AllocaInst *AI = AllocaVec[i];
uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
uint64_t AlignedSize = getAlignedAllocaSize(AI);
- assert(AlignedSize - SizeInBytes < RedzoneSize);
+ assert(AlignedSize - SizeInBytes < RedzoneSize());
Value *Ptr = NULL;
Pos += AlignedSize;
@@ -961,13 +1262,13 @@ void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec,
// Poison the partial redzone at right
Ptr = IRB.CreateAdd(
ShadowBase, ConstantInt::get(IntptrTy,
- (Pos >> MappingScale) - ShadowRZSize));
- size_t AddressableBytes = RedzoneSize - (AlignedSize - SizeInBytes);
+ (Pos >> Mapping.Scale) - ShadowRZSize));
+ size_t AddressableBytes = RedzoneSize() - (AlignedSize - SizeInBytes);
uint32_t Poison = 0;
if (DoPoison) {
PoisonShadowPartialRightRedzone((uint8_t*)&Poison, AddressableBytes,
- RedzoneSize,
- 1ULL << MappingScale,
+ RedzoneSize(),
+ 1ULL << Mapping.Scale,
kAsanStackPartialRedzoneMagic);
}
Value *PartialPoison = ConstantInt::get(RZTy, Poison);
@@ -976,76 +1277,23 @@ void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec,
// Poison the full redzone at right.
Ptr = IRB.CreateAdd(ShadowBase,
- ConstantInt::get(IntptrTy, Pos >> MappingScale));
- Value *Poison = i == AllocaVec.size() - 1 ? PoisonRight : PoisonMid;
+ ConstantInt::get(IntptrTy, Pos >> Mapping.Scale));
+ bool LastAlloca = (i == AllocaVec.size() - 1);
+ Value *Poison = LastAlloca ? PoisonRight : PoisonMid;
IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, RZPtrTy));
- Pos += RedzoneSize;
+ Pos += RedzoneSize();
}
}
-// Workaround for bug 11395: we don't want to instrument stack in functions
-// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
-// FIXME: remove once the bug 11395 is fixed.
-bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
- if (LongSize != 32) return false;
- CallInst *CI = dyn_cast<CallInst>(I);
- if (!CI || !CI->isInlineAsm()) return false;
- if (CI->getNumArgOperands() <= 5) return false;
- // We have inline assembly with quite a few arguments.
- return true;
-}
-
-// Find all static Alloca instructions and put
-// poisoned red zones around all of them.
-// Then unpoison everything back before the function returns.
-//
-// Stack poisoning does not play well with exception handling.
-// When an exception is thrown, we essentially bypass the code
-// that unpoisones the stack. This is why the run-time library has
-// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
-// stack in the interceptor. This however does not work inside the
-// actual function which catches the exception. Most likely because the
-// compiler hoists the load of the shadow value somewhere too high.
-// This causes asan to report a non-existing bug on 453.povray.
-// It sounds like an LLVM bug.
-bool AddressSanitizer::poisonStackInFunction(Function &F) {
- if (!ClStack) return false;
- SmallVector<AllocaInst*, 16> AllocaVec;
- SmallVector<Instruction*, 8> RetVec;
- uint64_t TotalSize = 0;
+void FunctionStackPoisoner::poisonStack() {
+ uint64_t LocalStackSize = TotalStackSize +
+ (AllocaVec.size() + 1) * RedzoneSize();
- // Filter out Alloca instructions we want (and can) handle.
- // Collect Ret instructions.
- for (Function::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI) {
- BasicBlock &BB = *FI;
- for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
- BI != BE; ++BI) {
- if (isa<ReturnInst>(BI)) {
- RetVec.push_back(BI);
- continue;
- }
-
- AllocaInst *AI = dyn_cast<AllocaInst>(BI);
- if (!AI) continue;
- if (AI->isArrayAllocation()) continue;
- if (!AI->isStaticAlloca()) continue;
- if (!AI->getAllocatedType()->isSized()) continue;
- if (AI->getAlignment() > RedzoneSize) continue;
- AllocaVec.push_back(AI);
- uint64_t AlignedSize = getAlignedAllocaSize(AI);
- TotalSize += AlignedSize;
- }
- }
-
- if (AllocaVec.empty()) return false;
-
- uint64_t LocalStackSize = TotalSize + (AllocaVec.size() + 1) * RedzoneSize;
-
- bool DoStackMalloc = ClUseAfterReturn
+ bool DoStackMalloc = ASan.CheckUseAfterReturn
&& LocalStackSize <= kMaxStackMallocSize;
+ assert(AllocaVec.size() > 0);
Instruction *InsBefore = AllocaVec[0];
IRBuilder<> IRB(InsBefore);
@@ -1053,7 +1301,9 @@ bool AddressSanitizer::poisonStackInFunction(Function &F) {
Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize);
AllocaInst *MyAlloca =
new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore);
- MyAlloca->setAlignment(RedzoneSize);
+ if (ClRealignStack && StackAlignment < RedzoneSize())
+ StackAlignment = RedzoneSize();
+ MyAlloca->setAlignment(StackAlignment);
assert(MyAlloca->isStaticAlloca());
Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy);
Value *LocalStackBase = OrigStackBase;
@@ -1063,12 +1313,24 @@ bool AddressSanitizer::poisonStackInFunction(Function &F) {
ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase);
}
- // This string will be parsed by the run-time (DescribeStackAddress).
+ // This string will be parsed by the run-time (DescribeAddressIfStack).
SmallString<2048> StackDescriptionStorage;
raw_svector_ostream StackDescription(StackDescriptionStorage);
- StackDescription << F.getName() << " " << AllocaVec.size() << " ";
+ StackDescription << AllocaVec.size() << " ";
+
+ // Insert poison calls for lifetime intrinsics for alloca.
+ bool HavePoisonedAllocas = false;
+ for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) {
+ const AllocaPoisonCall &APC = AllocaPoisonCallVec[i];
+ IntrinsicInst *II = APC.InsBefore;
+ AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
+ assert(AI);
+ IRBuilder<> IRB(II);
+ poisonAlloca(AI, APC.Size, IRB, APC.DoPoison);
+ HavePoisonedAllocas |= APC.DoPoison;
+ }
- uint64_t Pos = RedzoneSize;
+ uint64_t Pos = RedzoneSize();
// Replace Alloca instructions with base+offset.
for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
AllocaInst *AI = AllocaVec[i];
@@ -1077,57 +1339,115 @@ bool AddressSanitizer::poisonStackInFunction(Function &F) {
StackDescription << Pos << " " << SizeInBytes << " "
<< Name.size() << " " << Name << " ";
uint64_t AlignedSize = getAlignedAllocaSize(AI);
- assert((AlignedSize % RedzoneSize) == 0);
- AI->replaceAllUsesWith(
- IRB.CreateIntToPtr(
+ assert((AlignedSize % RedzoneSize()) == 0);
+ Value *NewAllocaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Pos)),
- AI->getType()));
- Pos += AlignedSize + RedzoneSize;
+ AI->getType());
+ replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
+ AI->replaceAllUsesWith(NewAllocaPtr);
+ Pos += AlignedSize + RedzoneSize();
}
assert(Pos == LocalStackSize);
- // Write the Magic value and the frame description constant to the redzone.
+ // The left-most redzone has enough space for at least 4 pointers.
+ // Write the Magic value to redzone[0].
Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
BasePlus0);
- Value *BasePlus1 = IRB.CreateAdd(LocalStackBase,
- ConstantInt::get(IntptrTy, LongSize/8));
- BasePlus1 = IRB.CreateIntToPtr(BasePlus1, IntptrPtrTy);
+ // Write the frame description constant to redzone[1].
+ Value *BasePlus1 = IRB.CreateIntToPtr(
+ IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)),
+ IntptrPtrTy);
GlobalVariable *StackDescriptionGlobal =
createPrivateGlobalForString(*F.getParent(), StackDescription.str());
- GlobalsCreatedByAsan.insert(StackDescriptionGlobal);
- Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
+ Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal,
+ IntptrTy);
IRB.CreateStore(Description, BasePlus1);
+ // Write the PC to redzone[2].
+ Value *BasePlus2 = IRB.CreateIntToPtr(
+ IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
+ 2 * ASan.LongSize/8)),
+ IntptrPtrTy);
+ IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
// Poison the stack redzones at the entry.
- Value *ShadowBase = memToShadow(LocalStackBase, IRB);
- PoisonStack(ArrayRef<AllocaInst*>(AllocaVec), IRB, ShadowBase, true);
+ Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
+ poisonRedZones(AllocaVec, IRB, ShadowBase, true);
// Unpoison the stack before all ret instructions.
for (size_t i = 0, n = RetVec.size(); i < n; i++) {
Instruction *Ret = RetVec[i];
IRBuilder<> IRBRet(Ret);
-
// Mark the current frame as retired.
IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
BasePlus0);
// Unpoison the stack.
- PoisonStack(ArrayRef<AllocaInst*>(AllocaVec), IRBRet, ShadowBase, false);
-
+ poisonRedZones(AllocaVec, IRBRet, ShadowBase, false);
if (DoStackMalloc) {
+ // In use-after-return mode, mark the whole stack frame unaddressable.
IRBRet.CreateCall3(AsanStackFreeFunc, LocalStackBase,
ConstantInt::get(IntptrTy, LocalStackSize),
OrigStackBase);
+ } else if (HavePoisonedAllocas) {
+ // If we poisoned some allocas in llvm.lifetime analysis,
+ // unpoison whole stack frame now.
+ assert(LocalStackBase == OrigStackBase);
+ poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false);
}
}
// We are done. Remove the old unused alloca instructions.
for (size_t i = 0, n = AllocaVec.size(); i < n; i++)
AllocaVec[i]->eraseFromParent();
+}
- if (ClDebugStack) {
- DEBUG(dbgs() << F);
- }
+void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
+ IRBuilder<> IRB, bool DoPoison) {
+ // For now just insert the call to ASan runtime.
+ Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
+ Value *SizeArg = ConstantInt::get(IntptrTy, Size);
+ IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc
+ : AsanUnpoisonStackMemoryFunc,
+ AddrArg, SizeArg);
+}
- return true;
+// Handling llvm.lifetime intrinsics for a given %alloca:
+// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
+// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
+// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
+// could be poisoned by previous llvm.lifetime.end instruction, as the
+// variable may go in and out of scope several times, e.g. in loops).
+// (3) if we poisoned at least one %alloca in a function,
+// unpoison the whole stack frame at function exit.
+
+AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
+ // We're intested only in allocas we can handle.
+ return isInterestingAlloca(*AI) ? AI : 0;
+ // See if we've already calculated (or started to calculate) alloca for a
+ // given value.
+ AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
+ if (I != AllocaForValue.end())
+ return I->second;
+ // Store 0 while we're calculating alloca for value V to avoid
+ // infinite recursion if the value references itself.
+ AllocaForValue[V] = 0;
+ AllocaInst *Res = 0;
+ if (CastInst *CI = dyn_cast<CastInst>(V))
+ Res = findAllocaForValue(CI->getOperand(0));
+ else if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *IncValue = PN->getIncomingValue(i);
+ // Allow self-referencing phi-nodes.
+ if (IncValue == PN) continue;
+ AllocaInst *IncValueAI = findAllocaForValue(IncValue);
+ // AI for incoming values should exist and should all be equal.
+ if (IncValueAI == 0 || (Res != 0 && IncValueAI != Res))
+ return 0;
+ Res = IncValueAI;
+ }
+ }
+ if (Res != 0)
+ AllocaForValue[V] = Res;
+ return Res;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp
index ef34b8a56d88..927982d2af47 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp
@@ -13,26 +13,26 @@
//
//===----------------------------------------------------------------------===//
-#include <utility>
-#include <string>
-
-#include "BlackList.h"
+#include "llvm/Transforms/Utils/BlackList.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Function.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/Module.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
+#include <string>
+#include <utility>
namespace llvm {
BlackList::BlackList(const StringRef Path) {
// Validate and open blacklist file.
- if (!Path.size()) return;
+ if (Path.empty()) return;
OwningPtr<MemoryBuffer> File;
if (error_code EC = MemoryBuffer::getFile(Path, File)) {
report_fatal_error("Can't open blacklist file: " + Path + ": " +
@@ -52,6 +52,10 @@ BlackList::BlackList(const StringRef Path) {
std::pair<StringRef, StringRef> SplitLine = I->split(":");
StringRef Prefix = SplitLine.first;
std::string Regexp = SplitLine.second;
+ if (Regexp.empty()) {
+ // Missing ':' in the line.
+ report_fatal_error("malformed blacklist line: " + SplitLine.first);
+ }
// Replace * with .*
for (size_t pos = 0; (pos = Regexp.find("*", pos)) != std::string::npos;
@@ -68,38 +72,54 @@ BlackList::BlackList(const StringRef Path) {
}
// Add this regexp into the proper group by its prefix.
- if (Regexps[Prefix].size())
+ if (!Regexps[Prefix].empty())
Regexps[Prefix] += "|";
Regexps[Prefix] += Regexp;
}
// Iterate through each of the prefixes, and create Regexs for them.
- for (StringMap<std::string>::iterator I = Regexps.begin(), E = Regexps.end();
- I != E; ++I) {
+ for (StringMap<std::string>::const_iterator I = Regexps.begin(),
+ E = Regexps.end(); I != E; ++I) {
Entries[I->getKey()] = new Regex(I->getValue());
}
}
-bool BlackList::isIn(const Function &F) {
+bool BlackList::isIn(const Function &F) const {
return isIn(*F.getParent()) || inSection("fun", F.getName());
}
-bool BlackList::isIn(const GlobalVariable &G) {
+bool BlackList::isIn(const GlobalVariable &G) const {
return isIn(*G.getParent()) || inSection("global", G.getName());
}
-bool BlackList::isIn(const Module &M) {
+bool BlackList::isIn(const Module &M) const {
return inSection("src", M.getModuleIdentifier());
}
-bool BlackList::isInInit(const GlobalVariable &G) {
- return isIn(*G.getParent()) || inSection("global-init", G.getName());
+static StringRef GetGVTypeString(const GlobalVariable &G) {
+ // Types of GlobalVariables are always pointer types.
+ Type *GType = G.getType()->getElementType();
+ // For now we support blacklisting struct types only.
+ if (StructType *SGType = dyn_cast<StructType>(GType)) {
+ if (!SGType->isLiteral())
+ return SGType->getName();
+ }
+ return "<unknown type>";
+}
+
+bool BlackList::isInInit(const GlobalVariable &G) const {
+ return (isIn(*G.getParent()) ||
+ inSection("global-init", G.getName()) ||
+ inSection("global-init-type", GetGVTypeString(G)));
}
bool BlackList::inSection(const StringRef Section,
- const StringRef Query) {
- Regex *FunctionRegex = Entries[Section];
- return FunctionRegex ? FunctionRegex->match(Query) : false;
+ const StringRef Query) const {
+ StringMap<Regex*>::const_iterator I = Entries.find(Section);
+ if (I == Entries.end()) return false;
+
+ Regex *FunctionRegex = I->getValue();
+ return FunctionRegex->match(Query);
}
} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BlackList.h b/contrib/llvm/lib/Transforms/Instrumentation/BlackList.h
deleted file mode 100644
index f3c05a5058cc..000000000000
--- a/contrib/llvm/lib/Transforms/Instrumentation/BlackList.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===-- BlackList.h - blacklist for sanitizers ------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//===----------------------------------------------------------------------===//
-//
-// This is a utility class for instrumentation passes (like AddressSanitizer
-// or ThreadSanitizer) to avoid instrumenting some functions or global
-// variables based on a user-supplied blacklist.
-//
-// The blacklist disables instrumentation of various functions and global
-// variables. Each line contains a prefix, followed by a wild card expression.
-// Empty lines and lines starting with "#" are ignored.
-// ---
-// # Blacklisted items:
-// fun:*_ZN4base6subtle*
-// global:*global_with_bad_access_or_initialization*
-// global-init:*global_with_initialization_issues*
-// src:file_with_tricky_code.cc
-// ---
-// Note that the wild card is in fact an llvm::Regex, but * is automatically
-// replaced with .*
-// This is similar to the "ignore" feature of ThreadSanitizer.
-// http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores
-//
-//===----------------------------------------------------------------------===//
-//
-
-#include "llvm/ADT/StringMap.h"
-
-namespace llvm {
-class Function;
-class GlobalVariable;
-class Module;
-class Regex;
-class StringRef;
-
-class BlackList {
- public:
- BlackList(const StringRef Path);
- // Returns whether either this function or it's source file are blacklisted.
- bool isIn(const Function &F);
- // Returns whether either this global or it's source file are blacklisted.
- bool isIn(const GlobalVariable &G);
- // Returns whether this module is blacklisted by filename.
- bool isIn(const Module &M);
- // Returns whether a global should be excluded from initialization checking.
- bool isInInit(const GlobalVariable &G);
- private:
- StringMap<Regex*> Entries;
-
- bool inSection(const StringRef Section, const StringRef Query);
-};
-
-} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 7810b1b8a3ef..b094d42568f0 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -13,19 +13,19 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "bounds-checking"
-#include "llvm/IRBuilder.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Pass.h"
+#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/Support/TargetFolder.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
-#include "llvm/Transforms/Instrumentation.h"
using namespace llvm;
static cl::opt<bool> SingleTrapBB("bounds-checking-single-trap",
@@ -41,7 +41,7 @@ namespace {
struct BoundsChecking : public FunctionPass {
static char ID;
- BoundsChecking(unsigned _Penalty = 5) : FunctionPass(ID), Penalty(_Penalty){
+ BoundsChecking() : FunctionPass(ID) {
initializeBoundsCheckingPass(*PassRegistry::getPassRegistry());
}
@@ -59,7 +59,6 @@ namespace {
BuilderTy *Builder;
Instruction *Inst;
BasicBlock *TrapBB;
- unsigned Penalty;
BasicBlock *getTrapBB();
void emitBranchToTrap(Value *Cmp = 0);
@@ -109,6 +108,7 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
else
Cmp = 0; // unconditional branch
}
+ ++ChecksAdded;
Instruction *Inst = Builder->GetInsertPoint();
BasicBlock *OldBB = Inst->getParent();
@@ -163,7 +163,6 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
emitBranchToTrap(Or);
- ++ChecksAdded;
return true;
}
@@ -208,6 +207,6 @@ bool BoundsChecking::runOnFunction(Function &F) {
return MadeChange;
}
-FunctionPass *llvm::createBoundsCheckingPass(unsigned Penalty) {
- return new BoundsChecking(Penalty);
+FunctionPass *llvm::createBoundsCheckingPass() {
+ return new BoundsChecking();
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
index e8ef2654d256..a2459fbafe18 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
@@ -18,13 +18,13 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "insert-edge-profiling"
+#include "llvm/Transforms/Instrumentation.h"
#include "ProfilingUtils.h"
-#include "llvm/Module.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/ADT/Statistic.h"
#include <set>
using namespace llvm;
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index e9192e5cdd52..2edd151869e0 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -16,21 +16,23 @@
#define DEBUG_TYPE "insert-gcov-profiling"
-#include "ProfilingUtils.h"
#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/DebugInfo.h"
-#include "llvm/IRBuilder.h"
-#include "llvm/Instructions.h"
-#include "llvm/Module.h"
-#include "llvm/Pass.h"
+#include "ProfilingUtils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/UniqueVector.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DebugLoc.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/Support/PathV2.h"
#include "llvm/Support/raw_ostream.h"
@@ -39,30 +41,57 @@
#include <utility>
using namespace llvm;
+static cl::opt<std::string>
+DefaultGCOVVersion("default-gcov-version", cl::init("402*"), cl::Hidden,
+ cl::ValueRequired);
+
+GCOVOptions GCOVOptions::getDefault() {
+ GCOVOptions Options;
+ Options.EmitNotes = true;
+ Options.EmitData = true;
+ Options.UseCfgChecksum = false;
+ Options.NoRedZone = false;
+ Options.FunctionNamesInData = true;
+
+ if (DefaultGCOVVersion.size() != 4) {
+ llvm::report_fatal_error(std::string("Invalid -default-gcov-version: ") +
+ DefaultGCOVVersion);
+ }
+ memcpy(Options.Version, DefaultGCOVVersion.c_str(), 4);
+ return Options;
+}
+
namespace {
class GCOVProfiler : public ModulePass {
public:
static char ID;
- GCOVProfiler()
- : ModulePass(ID), EmitNotes(true), EmitData(true), Use402Format(false),
- UseExtraChecksum(false) {
+ GCOVProfiler() : ModulePass(ID), Options(GCOVOptions::getDefault()) {
+ ReversedVersion[0] = Options.Version[3];
+ ReversedVersion[1] = Options.Version[2];
+ ReversedVersion[2] = Options.Version[1];
+ ReversedVersion[3] = Options.Version[0];
+ ReversedVersion[4] = '\0';
initializeGCOVProfilerPass(*PassRegistry::getPassRegistry());
}
- GCOVProfiler(bool EmitNotes, bool EmitData, bool use402Format = false,
- bool useExtraChecksum = false)
- : ModulePass(ID), EmitNotes(EmitNotes), EmitData(EmitData),
- Use402Format(use402Format), UseExtraChecksum(useExtraChecksum) {
- assert((EmitNotes || EmitData) && "GCOVProfiler asked to do nothing?");
+ GCOVProfiler(const GCOVOptions &Options) : ModulePass(ID), Options(Options){
+ assert((Options.EmitNotes || Options.EmitData) &&
+ "GCOVProfiler asked to do nothing?");
+ ReversedVersion[0] = Options.Version[3];
+ ReversedVersion[1] = Options.Version[2];
+ ReversedVersion[2] = Options.Version[1];
+ ReversedVersion[3] = Options.Version[0];
+ ReversedVersion[4] = '\0';
initializeGCOVProfilerPass(*PassRegistry::getPassRegistry());
}
virtual const char *getPassName() const {
return "GCOV Profiler";
}
+
private:
bool runOnModule(Module &M);
- // Create the GCNO files for the Module based on DebugInfo.
- void emitGCNO();
+ // Create the .gcno files for the Module based on DebugInfo.
+ void emitProfileNotes();
// Modify the program to track transitions along edges and call into the
// profiling runtime to emit .gcda files when run.
@@ -73,6 +102,8 @@ namespace {
Constant *getIncrementIndirectCounterFunc();
Constant *getEmitFunctionFunc();
Constant *getEmitArcsFunc();
+ Constant *getDeleteWriteoutFunctionListFunc();
+ Constant *getDeleteFlushFunctionListFunc();
Constant *getEndFileFunc();
// Create or retrieve an i32 state value that is used to represent the
@@ -83,21 +114,22 @@ namespace {
// block number.
GlobalVariable *buildEdgeLookupTable(Function *F,
GlobalVariable *Counter,
- const UniqueVector<BasicBlock *> &Preds,
- const UniqueVector<BasicBlock *> &Succs);
+ const UniqueVector<BasicBlock *>&Preds,
+ const UniqueVector<BasicBlock*>&Succs);
// Add the function to write out all our counters to the global destructor
// list.
- void insertCounterWriteout(ArrayRef<std::pair<GlobalVariable*, MDNode*> >);
+ Function *insertCounterWriteout(ArrayRef<std::pair<GlobalVariable*,
+ MDNode*> >);
+ Function *insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> >);
void insertIndirectCounterIncrement();
- void insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> >);
std::string mangleName(DICompileUnit CU, const char *NewStem);
- bool EmitNotes;
- bool EmitData;
- bool Use402Format;
- bool UseExtraChecksum;
+ GCOVOptions Options;
+
+ // Reversed, NUL-terminated copy of Options.Version.
+ char ReversedVersion[5];
Module *M;
LLVMContext *Ctx;
@@ -108,10 +140,14 @@ char GCOVProfiler::ID = 0;
INITIALIZE_PASS(GCOVProfiler, "insert-gcov-profiling",
"Insert instrumentation for GCOV profiling", false, false)
-ModulePass *llvm::createGCOVProfilerPass(bool EmitNotes, bool EmitData,
- bool Use402Format,
- bool UseExtraChecksum) {
- return new GCOVProfiler(EmitNotes, EmitData, Use402Format, UseExtraChecksum);
+ModulePass *llvm::createGCOVProfilerPass(const GCOVOptions &Options) {
+ return new GCOVProfiler(Options);
+}
+
+static std::string getFunctionName(DISubprogram SP) {
+ if (!SP.getLinkageName().empty())
+ return SP.getLinkageName();
+ return SP.getName();
}
namespace {
@@ -249,8 +285,8 @@ namespace {
// object users can construct, the blocks and lines will be rooted here.
class GCOVFunction : public GCOVRecord {
public:
- GCOVFunction(DISubprogram SP, raw_ostream *os,
- bool Use402Format, bool UseExtraChecksum) {
+ GCOVFunction(DISubprogram SP, raw_ostream *os, uint32_t Ident,
+ bool UseCfgChecksum) {
this->os = os;
Function *F = SP.getFunction();
@@ -262,17 +298,16 @@ namespace {
ReturnBlock = new GCOVBlock(i++, os);
writeBytes(FunctionTag, 4);
- uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(SP.getName()) +
+ uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(getFunctionName(SP)) +
1 + lengthOfGCOVString(SP.getFilename()) + 1;
- if (UseExtraChecksum)
+ if (UseCfgChecksum)
++BlockLen;
write(BlockLen);
- uint32_t Ident = reinterpret_cast<intptr_t>((MDNode*)SP);
write(Ident);
write(0); // lineno checksum
- if (UseExtraChecksum)
+ if (UseCfgChecksum)
write(0); // cfg checksum
- writeGCOVString(SP.getName());
+ writeGCOVString(getFunctionName(SP));
writeGCOVString(SP.getFilename());
write(SP.getLineNumber());
}
@@ -347,19 +382,23 @@ std::string GCOVProfiler::mangleName(DICompileUnit CU, const char *NewStem) {
SmallString<128> Filename = CU.getFilename();
sys::path::replace_extension(Filename, NewStem);
- return sys::path::filename(Filename.str());
+ StringRef FName = sys::path::filename(Filename);
+ SmallString<128> CurPath;
+ if (sys::fs::current_path(CurPath)) return FName;
+ sys::path::append(CurPath, FName.str());
+ return CurPath.str();
}
bool GCOVProfiler::runOnModule(Module &M) {
this->M = &M;
Ctx = &M.getContext();
- if (EmitNotes) emitGCNO();
- if (EmitData) return emitProfileArcs();
+ if (Options.EmitNotes) emitProfileNotes();
+ if (Options.EmitData) return emitProfileArcs();
return false;
}
-void GCOVProfiler::emitGCNO() {
+void GCOVProfiler::emitProfileNotes() {
NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
if (!CU_Nodes) return;
@@ -372,10 +411,9 @@ void GCOVProfiler::emitGCNO() {
std::string ErrorInfo;
raw_fd_ostream out(mangleName(CU, "gcno").c_str(), ErrorInfo,
raw_fd_ostream::F_Binary);
- if (!Use402Format)
- out.write("oncg*404MVLL", 12);
- else
- out.write("oncg*204MVLL", 12);
+ out.write("oncg", 4);
+ out.write(ReversedVersion, 4);
+ out.write("MVLL", 4);
DIArray SPs = CU.getSubprograms();
for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
@@ -384,7 +422,7 @@ void GCOVProfiler::emitGCNO() {
Function *F = SP.getFunction();
if (!F) continue;
- GCOVFunction Func(SP, &out, Use402Format, UseExtraChecksum);
+ GCOVFunction Func(SP, &out, i, Options.UseCfgChecksum);
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
GCOVBlock &Block = Func.getBlock(BB);
@@ -465,21 +503,18 @@ bool GCOVProfiler::emitProfileArcs() {
Value *Counter = Builder.CreateConstInBoundsGEP2_64(Counters, 0,
Edge);
Value *Count = Builder.CreateLoad(Counter);
- Count = Builder.CreateAdd(Count,
- ConstantInt::get(Type::getInt64Ty(*Ctx),1));
+ Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Builder.CreateStore(Count, Counter);
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- Value *Sel = Builder.CreateSelect(
- BI->getCondition(),
- ConstantInt::get(Type::getInt64Ty(*Ctx), Edge),
- ConstantInt::get(Type::getInt64Ty(*Ctx), Edge + 1));
+ Value *Sel = Builder.CreateSelect(BI->getCondition(),
+ Builder.getInt64(Edge),
+ Builder.getInt64(Edge + 1));
SmallVector<Value *, 2> Idx;
- Idx.push_back(Constant::getNullValue(Type::getInt64Ty(*Ctx)));
+ Idx.push_back(Builder.getInt64(0));
Idx.push_back(Sel);
Value *Counter = Builder.CreateInBoundsGEP(Counters, Idx);
Value *Count = Builder.CreateLoad(Counter);
- Count = Builder.CreateAdd(Count,
- ConstantInt::get(Type::getInt64Ty(*Ctx),1));
+ Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Builder.CreateStore(Count, Counter);
} else {
ComplexEdgePreds.insert(BB);
@@ -496,10 +531,9 @@ bool GCOVProfiler::emitProfileArcs() {
ComplexEdgePreds, ComplexEdgeSuccs);
GlobalVariable *EdgeState = getEdgeStateValue();
- Type *Int32Ty = Type::getInt32Ty(*Ctx);
for (int i = 0, e = ComplexEdgePreds.size(); i != e; ++i) {
IRBuilder<> Builder(ComplexEdgePreds[i+1]->getTerminator());
- Builder.CreateStore(ConstantInt::get(Int32Ty, i), EdgeState);
+ Builder.CreateStore(Builder.getInt32(i), EdgeState);
}
for (int i = 0, e = ComplexEdgeSuccs.size(); i != e; ++i) {
// call runtime to perform increment
@@ -518,8 +552,38 @@ bool GCOVProfiler::emitProfileArcs() {
}
}
- insertCounterWriteout(CountersBySP);
- insertFlush(CountersBySP);
+ Function *WriteoutF = insertCounterWriteout(CountersBySP);
+ Function *FlushF = insertFlush(CountersBySP);
+
+ // Create a small bit of code that registers the "__llvm_gcov_writeout" to
+ // be executed at exit and the "__llvm_gcov_flush" function to be executed
+ // when "__gcov_flush" is called.
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ Function *F = Function::Create(FTy, GlobalValue::InternalLinkage,
+ "__llvm_gcov_init", M);
+ F->setUnnamedAddr(true);
+ F->setLinkage(GlobalValue::InternalLinkage);
+ F->addFnAttr(Attribute::NoInline);
+ if (Options.NoRedZone)
+ F->addFnAttr(Attribute::NoRedZone);
+
+ BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
+ IRBuilder<> Builder(BB);
+
+ FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ Type *Params[] = {
+ PointerType::get(FTy, 0),
+ PointerType::get(FTy, 0)
+ };
+ FTy = FunctionType::get(Builder.getVoidTy(), Params, false);
+
+ // Inialize the environment and register the local writeout and flush
+ // functions.
+ Constant *GCOVInit = M->getOrInsertFunction("llvm_gcov_init", FTy);
+ Builder.CreateCall2(GCOVInit, WriteoutF, FlushF);
+ Builder.CreateRetVoid();
+
+ appendToGlobalCtors(*M, F, 0);
}
if (InsertIndCounterIncrCode)
@@ -540,13 +604,13 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
// read it. Threads and invoke make this untrue.
// emit [(succs * preds) x i64*], logically [succ x [pred x i64*]].
+ size_t TableSize = Succs.size() * Preds.size();
Type *Int64PtrTy = Type::getInt64PtrTy(*Ctx);
- ArrayType *EdgeTableTy = ArrayType::get(
- Int64PtrTy, Succs.size() * Preds.size());
+ ArrayType *EdgeTableTy = ArrayType::get(Int64PtrTy, TableSize);
- Constant **EdgeTable = new Constant*[Succs.size() * Preds.size()];
+ OwningArrayPtr<Constant *> EdgeTable(new Constant*[TableSize]);
Constant *NullValue = Constant::getNullValue(Int64PtrTy);
- for (int i = 0, ie = Succs.size() * Preds.size(); i != ie; ++i)
+ for (size_t i = 0; i != TableSize; ++i)
EdgeTable[i] = NullValue;
unsigned Edge = 0;
@@ -556,8 +620,8 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
if (Successors > 1 && !isa<BranchInst>(TI) && !isa<ReturnInst>(TI)) {
for (int i = 0; i != Successors; ++i) {
BasicBlock *Succ = TI->getSuccessor(i);
- IRBuilder<> builder(Succ);
- Value *Counter = builder.CreateConstInBoundsGEP2_64(Counters, 0,
+ IRBuilder<> Builder(Succ);
+ Value *Counter = Builder.CreateConstInBoundsGEP2_64(Counters, 0,
Edge + i);
EdgeTable[((Succs.idFor(Succ)-1) * Preds.size()) +
(Preds.idFor(BB)-1)] = cast<Constant>(Counter);
@@ -566,7 +630,7 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
Edge += Successors;
}
- ArrayRef<Constant*> V(&EdgeTable[0], Succs.size() * Preds.size());
+ ArrayRef<Constant*> V(&EdgeTable[0], TableSize);
GlobalVariable *EdgeTableGV =
new GlobalVariable(
*M, EdgeTableTy, true, GlobalValue::InternalLinkage,
@@ -577,8 +641,11 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
}
Constant *GCOVProfiler::getStartFileFunc() {
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
- Type::getInt8PtrTy(*Ctx), false);
+ Type *Args[] = {
+ Type::getInt8PtrTy(*Ctx), // const char *orig_filename
+ Type::getInt8PtrTy(*Ctx), // const char version[4]
+ };
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_start_file", FTy);
}
@@ -594,9 +661,10 @@ Constant *GCOVProfiler::getIncrementIndirectCounterFunc() {
}
Constant *GCOVProfiler::getEmitFunctionFunc() {
- Type *Args[2] = {
+ Type *Args[3] = {
Type::getInt32Ty(*Ctx), // uint32_t ident
Type::getInt8PtrTy(*Ctx), // const char *function_name
+ Type::getInt8Ty(*Ctx), // uint8_t use_extra_checksum
};
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_emit_function", FTy);
@@ -607,11 +675,20 @@ Constant *GCOVProfiler::getEmitArcsFunc() {
Type::getInt32Ty(*Ctx), // uint32_t num_counters
Type::getInt64PtrTy(*Ctx), // uint64_t *counters
};
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
- Args, false);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_emit_arcs", FTy);
}
+Constant *GCOVProfiler::getDeleteWriteoutFunctionListFunc() {
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ return M->getOrInsertFunction("llvm_delete_writeout_function_list", FTy);
+}
+
+Constant *GCOVProfiler::getDeleteFlushFunctionListFunc() {
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ return M->getOrInsertFunction("llvm_delete_flush_function_list", FTy);
+}
+
Constant *GCOVProfiler::getEndFileFunc() {
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
return M->getOrInsertFunction("llvm_gcda_end_file", FTy);
@@ -630,7 +707,7 @@ GlobalVariable *GCOVProfiler::getEdgeStateValue() {
return GV;
}
-void GCOVProfiler::insertCounterWriteout(
+Function *GCOVProfiler::insertCounterWriteout(
ArrayRef<std::pair<GlobalVariable *, MDNode *> > CountersBySP) {
FunctionType *WriteoutFTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
Function *WriteoutF = M->getFunction("__llvm_gcov_writeout");
@@ -638,6 +715,9 @@ void GCOVProfiler::insertCounterWriteout(
WriteoutF = Function::Create(WriteoutFTy, GlobalValue::InternalLinkage,
"__llvm_gcov_writeout", M);
WriteoutF->setUnnamedAddr(true);
+ WriteoutF->addFnAttr(Attribute::NoInline);
+ if (Options.NoRedZone)
+ WriteoutF->addFnAttr(Attribute::NoRedZone);
BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", WriteoutF);
IRBuilder<> Builder(BB);
@@ -652,48 +732,31 @@ void GCOVProfiler::insertCounterWriteout(
for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
DICompileUnit CU(CU_Nodes->getOperand(i));
std::string FilenameGcda = mangleName(CU, "gcda");
- Builder.CreateCall(StartFile,
- Builder.CreateGlobalStringPtr(FilenameGcda));
- for (ArrayRef<std::pair<GlobalVariable *, MDNode *> >::iterator
- I = CountersBySP.begin(), E = CountersBySP.end();
- I != E; ++I) {
- DISubprogram SP(I->second);
- intptr_t ident = reinterpret_cast<intptr_t>(I->second);
- Builder.CreateCall2(EmitFunction,
- ConstantInt::get(Type::getInt32Ty(*Ctx), ident),
- Builder.CreateGlobalStringPtr(SP.getName()));
-
- GlobalVariable *GV = I->first;
+ Builder.CreateCall2(StartFile,
+ Builder.CreateGlobalStringPtr(FilenameGcda),
+ Builder.CreateGlobalStringPtr(ReversedVersion));
+ for (unsigned j = 0, e = CountersBySP.size(); j != e; ++j) {
+ DISubprogram SP(CountersBySP[j].second);
+ Builder.CreateCall3(
+ EmitFunction, Builder.getInt32(j),
+ Options.FunctionNamesInData ?
+ Builder.CreateGlobalStringPtr(getFunctionName(SP)) :
+ Constant::getNullValue(Builder.getInt8PtrTy()),
+ Builder.getInt8(Options.UseCfgChecksum));
+
+ GlobalVariable *GV = CountersBySP[j].first;
unsigned Arcs =
cast<ArrayType>(GV->getType()->getElementType())->getNumElements();
Builder.CreateCall2(EmitArcs,
- ConstantInt::get(Type::getInt32Ty(*Ctx), Arcs),
+ Builder.getInt32(Arcs),
Builder.CreateConstGEP2_64(GV, 0, 0));
}
Builder.CreateCall(EndFile);
}
}
- Builder.CreateRetVoid();
- // Create a small bit of code that registers the "__llvm_gcov_writeout"
- // function to be executed at exit.
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
- Function *F = Function::Create(FTy, GlobalValue::InternalLinkage,
- "__llvm_gcov_init", M);
- F->setUnnamedAddr(true);
- F->setLinkage(GlobalValue::InternalLinkage);
- F->addFnAttr(Attributes::NoInline);
-
- BB = BasicBlock::Create(*Ctx, "entry", F);
- Builder.SetInsertPoint(BB);
-
- FTy = FunctionType::get(Type::getInt32Ty(*Ctx),
- PointerType::get(FTy, 0), false);
- Constant *AtExitFn = M->getOrInsertFunction("atexit", FTy);
- Builder.CreateCall(AtExitFn, WriteoutF);
Builder.CreateRetVoid();
-
- appendToGlobalCtors(*M, F, 0);
+ return WriteoutF;
}
void GCOVProfiler::insertIndirectCounterIncrement() {
@@ -701,11 +764,9 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
cast<Function>(GCOVProfiler::getIncrementIndirectCounterFunc());
Fn->setUnnamedAddr(true);
Fn->setLinkage(GlobalValue::InternalLinkage);
- Fn->addFnAttr(Attributes::NoInline);
-
- Type *Int32Ty = Type::getInt32Ty(*Ctx);
- Type *Int64Ty = Type::getInt64Ty(*Ctx);
- Constant *NegOne = ConstantInt::get(Int32Ty, 0xffffffff);
+ Fn->addFnAttr(Attribute::NoInline);
+ if (Options.NoRedZone)
+ Fn->addFnAttr(Attribute::NoRedZone);
// Create basic blocks for function.
BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", Fn);
@@ -720,26 +781,27 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
Argument *Arg = Fn->arg_begin();
Arg->setName("predecessor");
Value *Pred = Builder.CreateLoad(Arg, "pred");
- Value *Cond = Builder.CreateICmpEQ(Pred, NegOne);
+ Value *Cond = Builder.CreateICmpEQ(Pred, Builder.getInt32(0xffffffff));
BranchInst::Create(Exit, PredNotNegOne, Cond, BB);
Builder.SetInsertPoint(PredNotNegOne);
// uint64_t *counter = counters[pred];
// if (!counter) return;
- Value *ZExtPred = Builder.CreateZExt(Pred, Int64Ty);
+ Value *ZExtPred = Builder.CreateZExt(Pred, Builder.getInt64Ty());
Arg = llvm::next(Fn->arg_begin());
Arg->setName("counters");
Value *GEP = Builder.CreateGEP(Arg, ZExtPred);
Value *Counter = Builder.CreateLoad(GEP, "counter");
Cond = Builder.CreateICmpEQ(Counter,
- Constant::getNullValue(Int64Ty->getPointerTo()));
+ Constant::getNullValue(
+ Builder.getInt64Ty()->getPointerTo()));
Builder.CreateCondBr(Cond, Exit, CounterEnd);
// ++*counter;
Builder.SetInsertPoint(CounterEnd);
Value *Add = Builder.CreateAdd(Builder.CreateLoad(Counter),
- ConstantInt::get(Int64Ty, 1));
+ Builder.getInt64(1));
Builder.CreateStore(Add, Counter);
Builder.CreateBr(Exit);
@@ -748,16 +810,19 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
Builder.CreateRetVoid();
}
-void GCOVProfiler::
+Function *GCOVProfiler::
insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> > CountersBySP) {
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
- Function *FlushF = M->getFunction("__gcov_flush");
+ Function *FlushF = M->getFunction("__llvm_gcov_flush");
if (!FlushF)
FlushF = Function::Create(FTy, GlobalValue::InternalLinkage,
- "__gcov_flush", M);
+ "__llvm_gcov_flush", M);
else
FlushF->setLinkage(GlobalValue::InternalLinkage);
FlushF->setUnnamedAddr(true);
+ FlushF->addFnAttr(Attribute::NoInline);
+ if (Options.NoRedZone)
+ FlushF->addFnAttr(Attribute::NoRedZone);
BasicBlock *Entry = BasicBlock::Create(*Ctx, "entry", FlushF);
@@ -781,8 +846,10 @@ insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> > CountersBySP) {
if (RetTy == Type::getVoidTy(*Ctx))
Builder.CreateRetVoid();
else if (RetTy->isIntegerTy())
- // Used if __gcov_flush was implicitly declared.
+ // Used if __llvm_gcov_flush was implicitly declared.
Builder.CreateRet(ConstantInt::get(RetTy, 0));
else
- report_fatal_error("invalid return type for __gcov_flush");
+ report_fatal_error("invalid return type for __llvm_gcov_flush");
+
+ return FlushF;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
index 1e0b4a348a17..8ba102559bb6 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -21,11 +21,13 @@ using namespace llvm;
/// library.
void llvm::initializeInstrumentation(PassRegistry &Registry) {
initializeAddressSanitizerPass(Registry);
+ initializeAddressSanitizerModulePass(Registry);
initializeBoundsCheckingPass(Registry);
initializeEdgeProfilerPass(Registry);
initializeGCOVProfilerPass(Registry);
initializeOptimalEdgeProfilerPass(Registry);
initializePathProfilerPass(Registry);
+ initializeMemorySanitizerPass(Registry);
initializeThreadSanitizerPass(Registry);
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h b/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h
index a4bb5a66af6d..363539b2886f 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h
@@ -15,10 +15,10 @@
#ifndef LLVM_ANALYSIS_MAXIMUMSPANNINGTREE_H
#define LLVM_ANALYSIS_MAXIMUMSPANNINGTREE_H
-#include "llvm/BasicBlock.h"
#include "llvm/ADT/EquivalenceClasses.h"
-#include <vector>
+#include "llvm/IR/BasicBlock.h"
#include <algorithm>
+#include <vector>
namespace llvm {
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
new file mode 100644
index 000000000000..4e75904ded4f
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -0,0 +1,1985 @@
+//===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file is a part of MemorySanitizer, a detector of uninitialized
+/// reads.
+///
+/// Status: early prototype.
+///
+/// The algorithm of the tool is similar to Memcheck
+/// (http://goo.gl/QKbem). We associate a few shadow bits with every
+/// byte of the application memory, poison the shadow of the malloc-ed
+/// or alloca-ed memory, load the shadow bits on every memory read,
+/// propagate the shadow bits through some of the arithmetic
+/// instruction (including MOV), store the shadow bits on every memory
+/// write, report a bug on some other instructions (e.g. JMP) if the
+/// associated shadow is poisoned.
+///
+/// But there are differences too. The first and the major one:
+/// compiler instrumentation instead of binary instrumentation. This
+/// gives us much better register allocation, possible compiler
+/// optimizations and a fast start-up. But this brings the major issue
+/// as well: msan needs to see all program events, including system
+/// calls and reads/writes in system libraries, so we either need to
+/// compile *everything* with msan or use a binary translation
+/// component (e.g. DynamoRIO) to instrument pre-built libraries.
+/// Another difference from Memcheck is that we use 8 shadow bits per
+/// byte of application memory and use a direct shadow mapping. This
+/// greatly simplifies the instrumentation code and avoids races on
+/// shadow updates (Memcheck is single-threaded so races are not a
+/// concern there. Memcheck uses 2 shadow bits per byte with a slow
+/// path storage that uses 8 bits per byte).
+///
+/// The default value of shadow is 0, which means "clean" (not poisoned).
+///
+/// Every module initializer should call __msan_init to ensure that the
+/// shadow memory is ready. On error, __msan_warning is called. Since
+/// parameters and return values may be passed via registers, we have a
+/// specialized thread-local shadow for return values
+/// (__msan_retval_tls) and parameters (__msan_param_tls).
+///
+/// Origin tracking.
+///
+/// MemorySanitizer can track origins (allocation points) of all uninitialized
+/// values. This behavior is controlled with a flag (msan-track-origins) and is
+/// disabled by default.
+///
+/// Origins are 4-byte values created and interpreted by the runtime library.
+/// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
+/// of application memory. Propagation of origins is basically a bunch of
+/// "select" instructions that pick the origin of a dirty argument, if an
+/// instruction has one.
+///
+/// Every 4 aligned, consecutive bytes of application memory have one origin
+/// value associated with them. If these bytes contain uninitialized data
+/// coming from 2 different allocations, the last store wins. Because of this,
+/// MemorySanitizer reports can show unrelated origins, but this is unlikely in
+/// practice.
+///
+/// Origins are meaningless for fully initialized values, so MemorySanitizer
+/// avoids storing origin to memory when a fully initialized value is stored.
+/// This way it avoids needless overwritting origin of the 4-byte region on
+/// a short (i.e. 1 byte) clean store, and it is also good for performance.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "msan"
+
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/InstVisitor.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/BlackList.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+
+using namespace llvm;
+
+static const uint64_t kShadowMask32 = 1ULL << 31;
+static const uint64_t kShadowMask64 = 1ULL << 46;
+static const uint64_t kOriginOffset32 = 1ULL << 30;
+static const uint64_t kOriginOffset64 = 1ULL << 45;
+static const unsigned kMinOriginAlignment = 4;
+static const unsigned kShadowTLSAlignment = 8;
+
+/// \brief Track origins of uninitialized values.
+///
+/// Adds a section to MemorySanitizer report that points to the allocation
+/// (stack or heap) the uninitialized bits came from originally.
+static cl::opt<bool> ClTrackOrigins("msan-track-origins",
+ cl::desc("Track origins (allocation sites) of poisoned memory"),
+ cl::Hidden, cl::init(false));
+static cl::opt<bool> ClKeepGoing("msan-keep-going",
+ cl::desc("keep going after reporting a UMR"),
+ cl::Hidden, cl::init(false));
+static cl::opt<bool> ClPoisonStack("msan-poison-stack",
+ cl::desc("poison uninitialized stack variables"),
+ cl::Hidden, cl::init(true));
+static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
+ cl::desc("poison uninitialized stack variables with a call"),
+ cl::Hidden, cl::init(false));
+static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
+ cl::desc("poison uninitialized stack variables with the given patter"),
+ cl::Hidden, cl::init(0xff));
+static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
+ cl::desc("poison undef temps"),
+ cl::Hidden, cl::init(true));
+
+static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
+ cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
+ cl::Hidden, cl::init(true));
+
+static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
+ cl::desc("exact handling of relational integer ICmp"),
+ cl::Hidden, cl::init(false));
+
+static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
+ cl::desc("store origin for clean (fully initialized) values"),
+ cl::Hidden, cl::init(false));
+
+// This flag controls whether we check the shadow of the address
+// operand of load or store. Such bugs are very rare, since load from
+// a garbage address typically results in SEGV, but still happen
+// (e.g. only lower bits of address are garbage, or the access happens
+// early at program startup where malloc-ed memory is more likely to
+// be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
+static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
+ cl::desc("report accesses through a pointer which has poisoned shadow"),
+ cl::Hidden, cl::init(true));
+
+static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
+ cl::desc("print out instructions with default strict semantics"),
+ cl::Hidden, cl::init(false));
+
+static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
+ cl::desc("File containing the list of functions where MemorySanitizer "
+ "should not report bugs"), cl::Hidden);
+
+namespace {
+
+/// \brief An instrumentation pass implementing detection of uninitialized
+/// reads.
+///
+/// MemorySanitizer: instrument the code in module to find
+/// uninitialized reads.
+class MemorySanitizer : public FunctionPass {
+ public:
+ MemorySanitizer(bool TrackOrigins = false,
+ StringRef BlacklistFile = StringRef())
+ : FunctionPass(ID),
+ TrackOrigins(TrackOrigins || ClTrackOrigins),
+ TD(0),
+ WarningFn(0),
+ BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
+ : BlacklistFile) { }
+ const char *getPassName() const { return "MemorySanitizer"; }
+ bool runOnFunction(Function &F);
+ bool doInitialization(Module &M);
+ static char ID; // Pass identification, replacement for typeid.
+
+ private:
+ void initializeCallbacks(Module &M);
+
+ /// \brief Track origins (allocation points) of uninitialized values.
+ bool TrackOrigins;
+
+ DataLayout *TD;
+ LLVMContext *C;
+ Type *IntptrTy;
+ Type *OriginTy;
+ /// \brief Thread-local shadow storage for function parameters.
+ GlobalVariable *ParamTLS;
+ /// \brief Thread-local origin storage for function parameters.
+ GlobalVariable *ParamOriginTLS;
+ /// \brief Thread-local shadow storage for function return value.
+ GlobalVariable *RetvalTLS;
+ /// \brief Thread-local origin storage for function return value.
+ GlobalVariable *RetvalOriginTLS;
+ /// \brief Thread-local shadow storage for in-register va_arg function
+ /// parameters (x86_64-specific).
+ GlobalVariable *VAArgTLS;
+ /// \brief Thread-local shadow storage for va_arg overflow area
+ /// (x86_64-specific).
+ GlobalVariable *VAArgOverflowSizeTLS;
+ /// \brief Thread-local space used to pass origin value to the UMR reporting
+ /// function.
+ GlobalVariable *OriginTLS;
+
+ /// \brief The run-time callback to print a warning.
+ Value *WarningFn;
+ /// \brief Run-time helper that copies origin info for a memory range.
+ Value *MsanCopyOriginFn;
+ /// \brief Run-time helper that generates a new origin value for a stack
+ /// allocation.
+ Value *MsanSetAllocaOriginFn;
+ /// \brief Run-time helper that poisons stack on function entry.
+ Value *MsanPoisonStackFn;
+ /// \brief MSan runtime replacements for memmove, memcpy and memset.
+ Value *MemmoveFn, *MemcpyFn, *MemsetFn;
+
+ /// \brief Address mask used in application-to-shadow address calculation.
+ /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
+ uint64_t ShadowMask;
+ /// \brief Offset of the origin shadow from the "normal" shadow.
+ /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
+ uint64_t OriginOffset;
+ /// \brief Branch weights for error reporting.
+ MDNode *ColdCallWeights;
+ /// \brief Branch weights for origin store.
+ MDNode *OriginStoreWeights;
+ /// \bried Path to blacklist file.
+ SmallString<64> BlacklistFile;
+ /// \brief The blacklist.
+ OwningPtr<BlackList> BL;
+ /// \brief An empty volatile inline asm that prevents callback merge.
+ InlineAsm *EmptyAsm;
+
+ friend struct MemorySanitizerVisitor;
+ friend struct VarArgAMD64Helper;
+};
+} // namespace
+
+char MemorySanitizer::ID = 0;
+INITIALIZE_PASS(MemorySanitizer, "msan",
+ "MemorySanitizer: detects uninitialized reads.",
+ false, false)
+
+FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
+ StringRef BlacklistFile) {
+ return new MemorySanitizer(TrackOrigins, BlacklistFile);
+}
+
+/// \brief Create a non-const global initialized with the given string.
+///
+/// Creates a writable global for Str so that we can pass it to the
+/// run-time lib. Runtime uses first 4 bytes of the string to store the
+/// frame ID, so the string needs to be mutable.
+static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
+ StringRef Str) {
+ Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
+ return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
+ GlobalValue::PrivateLinkage, StrConst, "");
+}
+
+
+/// \brief Insert extern declaration of runtime-provided functions and globals.
+void MemorySanitizer::initializeCallbacks(Module &M) {
+ // Only do this once.
+ if (WarningFn)
+ return;
+
+ IRBuilder<> IRB(*C);
+ // Create the callback.
+ // FIXME: this function should have "Cold" calling conv,
+ // which is not yet implemented.
+ StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
+ : "__msan_warning_noreturn";
+ WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
+
+ MsanCopyOriginFn = M.getOrInsertFunction(
+ "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IntptrTy, NULL);
+ MsanSetAllocaOriginFn = M.getOrInsertFunction(
+ "__msan_set_alloca_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
+ IRB.getInt8PtrTy(), NULL);
+ MsanPoisonStackFn = M.getOrInsertFunction(
+ "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
+ MemmoveFn = M.getOrInsertFunction(
+ "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IntptrTy, NULL);
+ MemcpyFn = M.getOrInsertFunction(
+ "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IntptrTy, NULL);
+ MemsetFn = M.getOrInsertFunction(
+ "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
+ IntptrTy, NULL);
+
+ // Create globals.
+ RetvalTLS = new GlobalVariable(
+ M, ArrayType::get(IRB.getInt64Ty(), 8), false,
+ GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
+ GlobalVariable::GeneralDynamicTLSModel);
+ RetvalOriginTLS = new GlobalVariable(
+ M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
+ "__msan_retval_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
+
+ ParamTLS = new GlobalVariable(
+ M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
+ GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
+ GlobalVariable::GeneralDynamicTLSModel);
+ ParamOriginTLS = new GlobalVariable(
+ M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
+ 0, "__msan_param_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
+
+ VAArgTLS = new GlobalVariable(
+ M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
+ GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
+ GlobalVariable::GeneralDynamicTLSModel);
+ VAArgOverflowSizeTLS = new GlobalVariable(
+ M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
+ "__msan_va_arg_overflow_size_tls", 0,
+ GlobalVariable::GeneralDynamicTLSModel);
+ OriginTLS = new GlobalVariable(
+ M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
+ "__msan_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
+
+ // We insert an empty inline asm after __msan_report* to avoid callback merge.
+ EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
+ StringRef(""), StringRef(""),
+ /*hasSideEffects=*/true);
+}
+
+/// \brief Module-level initialization.
+///
+/// inserts a call to __msan_init to the module's constructor list.
+bool MemorySanitizer::doInitialization(Module &M) {
+ TD = getAnalysisIfAvailable<DataLayout>();
+ if (!TD)
+ return false;
+ BL.reset(new BlackList(BlacklistFile));
+ C = &(M.getContext());
+ unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
+ switch (PtrSize) {
+ case 64:
+ ShadowMask = kShadowMask64;
+ OriginOffset = kOriginOffset64;
+ break;
+ case 32:
+ ShadowMask = kShadowMask32;
+ OriginOffset = kOriginOffset32;
+ break;
+ default:
+ report_fatal_error("unsupported pointer size");
+ break;
+ }
+
+ IRBuilder<> IRB(*C);
+ IntptrTy = IRB.getIntPtrTy(TD);
+ OriginTy = IRB.getInt32Ty();
+
+ ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
+ OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
+
+ // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
+ appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
+ "__msan_init", IRB.getVoidTy(), NULL)), 0);
+
+ new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
+ IRB.getInt32(TrackOrigins), "__msan_track_origins");
+
+ new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
+ IRB.getInt32(ClKeepGoing), "__msan_keep_going");
+
+ return true;
+}
+
+namespace {
+
+/// \brief A helper class that handles instrumentation of VarArg
+/// functions on a particular platform.
+///
+/// Implementations are expected to insert the instrumentation
+/// necessary to propagate argument shadow through VarArg function
+/// calls. Visit* methods are called during an InstVisitor pass over
+/// the function, and should avoid creating new basic blocks. A new
+/// instance of this class is created for each instrumented function.
+struct VarArgHelper {
+ /// \brief Visit a CallSite.
+ virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
+
+ /// \brief Visit a va_start call.
+ virtual void visitVAStartInst(VAStartInst &I) = 0;
+
+ /// \brief Visit a va_copy call.
+ virtual void visitVACopyInst(VACopyInst &I) = 0;
+
+ /// \brief Finalize function instrumentation.
+ ///
+ /// This method is called after visiting all interesting (see above)
+ /// instructions in a function.
+ virtual void finalizeInstrumentation() = 0;
+
+ virtual ~VarArgHelper() {}
+};
+
+struct MemorySanitizerVisitor;
+
+VarArgHelper*
+CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
+ MemorySanitizerVisitor &Visitor);
+
+/// This class does all the work for a given function. Store and Load
+/// instructions store and load corresponding shadow and origin
+/// values. Most instructions propagate shadow from arguments to their
+/// return values. Certain instructions (most importantly, BranchInst)
+/// test their argument shadow and print reports (with a runtime call) if it's
+/// non-zero.
+struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
+ Function &F;
+ MemorySanitizer &MS;
+ SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
+ ValueMap<Value*, Value*> ShadowMap, OriginMap;
+ bool InsertChecks;
+ bool LoadShadow;
+ OwningPtr<VarArgHelper> VAHelper;
+
+ struct ShadowOriginAndInsertPoint {
+ Instruction *Shadow;
+ Instruction *Origin;
+ Instruction *OrigIns;
+ ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I)
+ : Shadow(S), Origin(O), OrigIns(I) { }
+ ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
+ };
+ SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
+ SmallVector<Instruction*, 16> StoreList;
+
+ MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
+ : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
+ LoadShadow = InsertChecks =
+ !MS.BL->isIn(F) &&
+ F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::SanitizeMemory);
+
+ DEBUG(if (!InsertChecks)
+ dbgs() << "MemorySanitizer is not inserting checks into '"
+ << F.getName() << "'\n");
+ }
+
+ void materializeStores() {
+ for (size_t i = 0, n = StoreList.size(); i < n; i++) {
+ StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
+
+ IRBuilder<> IRB(&I);
+ Value *Val = I.getValueOperand();
+ Value *Addr = I.getPointerOperand();
+ Value *Shadow = getShadow(Val);
+ Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
+
+ StoreInst *NewSI =
+ IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
+ DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
+ (void)NewSI;
+
+ if (ClCheckAccessAddress)
+ insertCheck(Addr, &I);
+
+ if (MS.TrackOrigins) {
+ unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
+ if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
+ IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
+ Alignment);
+ } else {
+ Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
+
+ Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow);
+ // TODO(eugenis): handle non-zero constant shadow by inserting an
+ // unconditional check (can not simply fail compilation as this could
+ // be in the dead code).
+ if (Cst)
+ continue;
+
+ Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
+ getCleanShadow(ConvertedShadow), "_mscmp");
+ Instruction *CheckTerm =
+ SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false,
+ MS.OriginStoreWeights);
+ IRBuilder<> IRBNew(CheckTerm);
+ IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
+ Alignment);
+ }
+ }
+ }
+ }
+
+ void materializeChecks() {
+ for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
+ Instruction *Shadow = InstrumentationList[i].Shadow;
+ Instruction *OrigIns = InstrumentationList[i].OrigIns;
+ IRBuilder<> IRB(OrigIns);
+ DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
+ Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
+ DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
+ Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
+ getCleanShadow(ConvertedShadow), "_mscmp");
+ Instruction *CheckTerm =
+ SplitBlockAndInsertIfThen(cast<Instruction>(Cmp),
+ /* Unreachable */ !ClKeepGoing,
+ MS.ColdCallWeights);
+
+ IRB.SetInsertPoint(CheckTerm);
+ if (MS.TrackOrigins) {
+ Instruction *Origin = InstrumentationList[i].Origin;
+ IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
+ MS.OriginTLS);
+ }
+ CallInst *Call = IRB.CreateCall(MS.WarningFn);
+ Call->setDebugLoc(OrigIns->getDebugLoc());
+ IRB.CreateCall(MS.EmptyAsm);
+ DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
+ }
+ DEBUG(dbgs() << "DONE:\n" << F);
+ }
+
+ /// \brief Add MemorySanitizer instrumentation to a function.
+ bool runOnFunction() {
+ MS.initializeCallbacks(*F.getParent());
+ if (!MS.TD) return false;
+
+ // In the presence of unreachable blocks, we may see Phi nodes with
+ // incoming nodes from such blocks. Since InstVisitor skips unreachable
+ // blocks, such nodes will not have any shadow value associated with them.
+ // It's easier to remove unreachable blocks than deal with missing shadow.
+ removeUnreachableBlocks(F);
+
+ // Iterate all BBs in depth-first order and create shadow instructions
+ // for all instructions (where applicable).
+ // For PHI nodes we create dummy shadow PHIs which will be finalized later.
+ for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
+ DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
+ BasicBlock *BB = *DI;
+ visit(*BB);
+ }
+
+ // Finalize PHI nodes.
+ for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
+ PHINode *PN = ShadowPHINodes[i];
+ PHINode *PNS = cast<PHINode>(getShadow(PN));
+ PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
+ size_t NumValues = PN->getNumIncomingValues();
+ for (size_t v = 0; v < NumValues; v++) {
+ PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
+ if (PNO)
+ PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
+ }
+ }
+
+ VAHelper->finalizeInstrumentation();
+
+ // Delayed instrumentation of StoreInst.
+ // This may add new checks to be inserted later.
+ materializeStores();
+
+ // Insert shadow value checks.
+ materializeChecks();
+
+ return true;
+ }
+
+ /// \brief Compute the shadow type that corresponds to a given Value.
+ Type *getShadowTy(Value *V) {
+ return getShadowTy(V->getType());
+ }
+
+ /// \brief Compute the shadow type that corresponds to a given Type.
+ Type *getShadowTy(Type *OrigTy) {
+ if (!OrigTy->isSized()) {
+ return 0;
+ }
+ // For integer type, shadow is the same as the original type.
+ // This may return weird-sized types like i1.
+ if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
+ return IT;
+ if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
+ uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
+ return VectorType::get(IntegerType::get(*MS.C, EltSize),
+ VT->getNumElements());
+ }
+ if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
+ SmallVector<Type*, 4> Elements;
+ for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
+ Elements.push_back(getShadowTy(ST->getElementType(i)));
+ StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
+ DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
+ return Res;
+ }
+ uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
+ return IntegerType::get(*MS.C, TypeSize);
+ }
+
+ /// \brief Flatten a vector type.
+ Type *getShadowTyNoVec(Type *ty) {
+ if (VectorType *vt = dyn_cast<VectorType>(ty))
+ return IntegerType::get(*MS.C, vt->getBitWidth());
+ return ty;
+ }
+
+ /// \brief Convert a shadow value to it's flattened variant.
+ Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
+ Type *Ty = V->getType();
+ Type *NoVecTy = getShadowTyNoVec(Ty);
+ if (Ty == NoVecTy) return V;
+ return IRB.CreateBitCast(V, NoVecTy);
+ }
+
+ /// \brief Compute the shadow address that corresponds to a given application
+ /// address.
+ ///
+ /// Shadow = Addr & ~ShadowMask.
+ Value *getShadowPtr(Value *Addr, Type *ShadowTy,
+ IRBuilder<> &IRB) {
+ Value *ShadowLong =
+ IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
+ ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
+ return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
+ }
+
+ /// \brief Compute the origin address that corresponds to a given application
+ /// address.
+ ///
+ /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
+ Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
+ Value *ShadowLong =
+ IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
+ ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
+ Value *Add =
+ IRB.CreateAdd(ShadowLong,
+ ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
+ Value *SecondAnd =
+ IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
+ return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
+ }
+
+ /// \brief Compute the shadow address for a given function argument.
+ ///
+ /// Shadow = ParamTLS+ArgOffset.
+ Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
+ int ArgOffset) {
+ Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
+ "_msarg");
+ }
+
+ /// \brief Compute the origin address for a given function argument.
+ Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
+ int ArgOffset) {
+ if (!MS.TrackOrigins) return 0;
+ Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
+ "_msarg_o");
+ }
+
+ /// \brief Compute the shadow address for a retval.
+ Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
+ Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
+ return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
+ "_msret");
+ }
+
+ /// \brief Compute the origin address for a retval.
+ Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
+ // We keep a single origin for the entire retval. Might be too optimistic.
+ return MS.RetvalOriginTLS;
+ }
+
+ /// \brief Set SV to be the shadow value for V.
+ void setShadow(Value *V, Value *SV) {
+ assert(!ShadowMap.count(V) && "Values may only have one shadow");
+ ShadowMap[V] = SV;
+ }
+
+ /// \brief Set Origin to be the origin value for V.
+ void setOrigin(Value *V, Value *Origin) {
+ if (!MS.TrackOrigins) return;
+ assert(!OriginMap.count(V) && "Values may only have one origin");
+ DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
+ OriginMap[V] = Origin;
+ }
+
+ /// \brief Create a clean shadow value for a given value.
+ ///
+ /// Clean shadow (all zeroes) means all bits of the value are defined
+ /// (initialized).
+ Constant *getCleanShadow(Value *V) {
+ Type *ShadowTy = getShadowTy(V);
+ if (!ShadowTy)
+ return 0;
+ return Constant::getNullValue(ShadowTy);
+ }
+
+ /// \brief Create a dirty shadow of a given shadow type.
+ Constant *getPoisonedShadow(Type *ShadowTy) {
+ assert(ShadowTy);
+ if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
+ return Constant::getAllOnesValue(ShadowTy);
+ StructType *ST = cast<StructType>(ShadowTy);
+ SmallVector<Constant *, 4> Vals;
+ for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
+ Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
+ return ConstantStruct::get(ST, Vals);
+ }
+
+ /// \brief Create a dirty shadow for a given value.
+ Constant *getPoisonedShadow(Value *V) {
+ Type *ShadowTy = getShadowTy(V);
+ if (!ShadowTy)
+ return 0;
+ return getPoisonedShadow(ShadowTy);
+ }
+
+ /// \brief Create a clean (zero) origin.
+ Value *getCleanOrigin() {
+ return Constant::getNullValue(MS.OriginTy);
+ }
+
+ /// \brief Get the shadow value for a given Value.
+ ///
+ /// This function either returns the value set earlier with setShadow,
+ /// or extracts if from ParamTLS (for function arguments).
+ Value *getShadow(Value *V) {
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ // For instructions the shadow is already stored in the map.
+ Value *Shadow = ShadowMap[V];
+ if (!Shadow) {
+ DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
+ (void)I;
+ assert(Shadow && "No shadow for a value");
+ }
+ return Shadow;
+ }
+ if (UndefValue *U = dyn_cast<UndefValue>(V)) {
+ Value *AllOnes = ClPoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
+ DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
+ (void)U;
+ return AllOnes;
+ }
+ if (Argument *A = dyn_cast<Argument>(V)) {
+ // For arguments we compute the shadow on demand and store it in the map.
+ Value **ShadowPtr = &ShadowMap[V];
+ if (*ShadowPtr)
+ return *ShadowPtr;
+ Function *F = A->getParent();
+ IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
+ unsigned ArgOffset = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ if (!AI->getType()->isSized()) {
+ DEBUG(dbgs() << "Arg is not sized\n");
+ continue;
+ }
+ unsigned Size = AI->hasByValAttr()
+ ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
+ : MS.TD->getTypeAllocSize(AI->getType());
+ if (A == AI) {
+ Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
+ if (AI->hasByValAttr()) {
+ // ByVal pointer itself has clean shadow. We copy the actual
+ // argument shadow to the underlying memory.
+ Value *Cpy = EntryIRB.CreateMemCpy(
+ getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
+ Base, Size, AI->getParamAlignment());
+ DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
+ (void)Cpy;
+ *ShadowPtr = getCleanShadow(V);
+ } else {
+ *ShadowPtr = EntryIRB.CreateLoad(Base);
+ }
+ DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
+ **ShadowPtr << "\n");
+ if (MS.TrackOrigins) {
+ Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
+ setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
+ }
+ }
+ ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
+ }
+ assert(*ShadowPtr && "Could not find shadow for an argument");
+ return *ShadowPtr;
+ }
+ // For everything else the shadow is zero.
+ return getCleanShadow(V);
+ }
+
+ /// \brief Get the shadow for i-th argument of the instruction I.
+ Value *getShadow(Instruction *I, int i) {
+ return getShadow(I->getOperand(i));
+ }
+
+ /// \brief Get the origin for a value.
+ Value *getOrigin(Value *V) {
+ if (!MS.TrackOrigins) return 0;
+ if (isa<Instruction>(V) || isa<Argument>(V)) {
+ Value *Origin = OriginMap[V];
+ if (!Origin) {
+ DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
+ Origin = getCleanOrigin();
+ }
+ return Origin;
+ }
+ return getCleanOrigin();
+ }
+
+ /// \brief Get the origin for i-th argument of the instruction I.
+ Value *getOrigin(Instruction *I, int i) {
+ return getOrigin(I->getOperand(i));
+ }
+
+ /// \brief Remember the place where a shadow check should be inserted.
+ ///
+ /// This location will be later instrumented with a check that will print a
+ /// UMR warning in runtime if the value is not fully defined.
+ void insertCheck(Value *Val, Instruction *OrigIns) {
+ assert(Val);
+ if (!InsertChecks) return;
+ Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
+ if (!Shadow) return;
+#ifndef NDEBUG
+ Type *ShadowTy = Shadow->getType();
+ assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
+ "Can only insert checks for integer and vector shadow types");
+#endif
+ Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
+ InstrumentationList.push_back(
+ ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
+ }
+
+ // ------------------- Visitors.
+
+ /// \brief Instrument LoadInst
+ ///
+ /// Loads the corresponding shadow and (optionally) origin.
+ /// Optionally, checks that the load address is fully defined.
+ void visitLoadInst(LoadInst &I) {
+ assert(I.getType()->isSized() && "Load type must have size");
+ IRBuilder<> IRB(&I);
+ Type *ShadowTy = getShadowTy(&I);
+ Value *Addr = I.getPointerOperand();
+ if (LoadShadow) {
+ Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
+ setShadow(&I,
+ IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
+ } else {
+ setShadow(&I, getCleanShadow(&I));
+ }
+
+ if (ClCheckAccessAddress)
+ insertCheck(I.getPointerOperand(), &I);
+
+ if (MS.TrackOrigins) {
+ if (LoadShadow) {
+ unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
+ setOrigin(&I,
+ IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
+ } else {
+ setOrigin(&I, getCleanOrigin());
+ }
+ }
+ }
+
+ /// \brief Instrument StoreInst
+ ///
+ /// Stores the corresponding shadow and (optionally) origin.
+ /// Optionally, checks that the store address is fully defined.
+ void visitStoreInst(StoreInst &I) {
+ StoreList.push_back(&I);
+ }
+
+ // Vector manipulation.
+ void visitExtractElementInst(ExtractElementInst &I) {
+ insertCheck(I.getOperand(1), &I);
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
+ "_msprop"));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitInsertElementInst(InsertElementInst &I) {
+ insertCheck(I.getOperand(2), &I);
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
+ I.getOperand(2), "_msprop"));
+ setOriginForNaryOp(I);
+ }
+
+ void visitShuffleVectorInst(ShuffleVectorInst &I) {
+ insertCheck(I.getOperand(2), &I);
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
+ I.getOperand(2), "_msprop"));
+ setOriginForNaryOp(I);
+ }
+
+ // Casts.
+ void visitSExtInst(SExtInst &I) {
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitZExtInst(ZExtInst &I) {
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitTruncInst(TruncInst &I) {
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitBitCastInst(BitCastInst &I) {
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitPtrToIntInst(PtrToIntInst &I) {
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
+ "_msprop_ptrtoint"));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitIntToPtrInst(IntToPtrInst &I) {
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
+ "_msprop_inttoptr"));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
+ void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
+ void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
+ void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
+ void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
+ void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
+
+ /// \brief Propagate shadow for bitwise AND.
+ ///
+ /// This code is exact, i.e. if, for example, a bit in the left argument
+ /// is defined and 0, then neither the value not definedness of the
+ /// corresponding bit in B don't affect the resulting shadow.
+ void visitAnd(BinaryOperator &I) {
+ IRBuilder<> IRB(&I);
+ // "And" of 0 and a poisoned value results in unpoisoned value.
+ // 1&1 => 1; 0&1 => 0; p&1 => p;
+ // 1&0 => 0; 0&0 => 0; p&0 => 0;
+ // 1&p => p; 0&p => 0; p&p => p;
+ // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
+ Value *S1 = getShadow(&I, 0);
+ Value *S2 = getShadow(&I, 1);
+ Value *V1 = I.getOperand(0);
+ Value *V2 = I.getOperand(1);
+ if (V1->getType() != S1->getType()) {
+ V1 = IRB.CreateIntCast(V1, S1->getType(), false);
+ V2 = IRB.CreateIntCast(V2, S2->getType(), false);
+ }
+ Value *S1S2 = IRB.CreateAnd(S1, S2);
+ Value *V1S2 = IRB.CreateAnd(V1, S2);
+ Value *S1V2 = IRB.CreateAnd(S1, V2);
+ setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
+ setOriginForNaryOp(I);
+ }
+
+ void visitOr(BinaryOperator &I) {
+ IRBuilder<> IRB(&I);
+ // "Or" of 1 and a poisoned value results in unpoisoned value.
+ // 1|1 => 1; 0|1 => 1; p|1 => 1;
+ // 1|0 => 1; 0|0 => 0; p|0 => p;
+ // 1|p => 1; 0|p => p; p|p => p;
+ // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
+ Value *S1 = getShadow(&I, 0);
+ Value *S2 = getShadow(&I, 1);
+ Value *V1 = IRB.CreateNot(I.getOperand(0));
+ Value *V2 = IRB.CreateNot(I.getOperand(1));
+ if (V1->getType() != S1->getType()) {
+ V1 = IRB.CreateIntCast(V1, S1->getType(), false);
+ V2 = IRB.CreateIntCast(V2, S2->getType(), false);
+ }
+ Value *S1S2 = IRB.CreateAnd(S1, S2);
+ Value *V1S2 = IRB.CreateAnd(V1, S2);
+ Value *S1V2 = IRB.CreateAnd(S1, V2);
+ setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
+ setOriginForNaryOp(I);
+ }
+
+ /// \brief Default propagation of shadow and/or origin.
+ ///
+ /// This class implements the general case of shadow propagation, used in all
+ /// cases where we don't know and/or don't care about what the operation
+ /// actually does. It converts all input shadow values to a common type
+ /// (extending or truncating as necessary), and bitwise OR's them.
+ ///
+ /// This is much cheaper than inserting checks (i.e. requiring inputs to be
+ /// fully initialized), and less prone to false positives.
+ ///
+ /// This class also implements the general case of origin propagation. For a
+ /// Nary operation, result origin is set to the origin of an argument that is
+ /// not entirely initialized. If there is more than one such arguments, the
+ /// rightmost of them is picked. It does not matter which one is picked if all
+ /// arguments are initialized.
+ template <bool CombineShadow>
+ class Combiner {
+ Value *Shadow;
+ Value *Origin;
+ IRBuilder<> &IRB;
+ MemorySanitizerVisitor *MSV;
+
+ public:
+ Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
+ Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
+
+ /// \brief Add a pair of shadow and origin values to the mix.
+ Combiner &Add(Value *OpShadow, Value *OpOrigin) {
+ if (CombineShadow) {
+ assert(OpShadow);
+ if (!Shadow)
+ Shadow = OpShadow;
+ else {
+ OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
+ Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
+ }
+ }
+
+ if (MSV->MS.TrackOrigins) {
+ assert(OpOrigin);
+ if (!Origin) {
+ Origin = OpOrigin;
+ } else {
+ Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
+ Value *Cond = IRB.CreateICmpNE(FlatShadow,
+ MSV->getCleanShadow(FlatShadow));
+ Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
+ }
+ }
+ return *this;
+ }
+
+ /// \brief Add an application value to the mix.
+ Combiner &Add(Value *V) {
+ Value *OpShadow = MSV->getShadow(V);
+ Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
+ return Add(OpShadow, OpOrigin);
+ }
+
+ /// \brief Set the current combined values as the given instruction's shadow
+ /// and origin.
+ void Done(Instruction *I) {
+ if (CombineShadow) {
+ assert(Shadow);
+ Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
+ MSV->setShadow(I, Shadow);
+ }
+ if (MSV->MS.TrackOrigins) {
+ assert(Origin);
+ MSV->setOrigin(I, Origin);
+ }
+ }
+ };
+
+ typedef Combiner<true> ShadowAndOriginCombiner;
+ typedef Combiner<false> OriginCombiner;
+
+ /// \brief Propagate origin for arbitrary operation.
+ void setOriginForNaryOp(Instruction &I) {
+ if (!MS.TrackOrigins) return;
+ IRBuilder<> IRB(&I);
+ OriginCombiner OC(this, IRB);
+ for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
+ OC.Add(OI->get());
+ OC.Done(&I);
+ }
+
+ size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
+ assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
+ "Vector of pointers is not a valid shadow type");
+ return Ty->isVectorTy() ?
+ Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
+ Ty->getPrimitiveSizeInBits();
+ }
+
+ /// \brief Cast between two shadow types, extending or truncating as
+ /// necessary.
+ Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) {
+ Type *srcTy = V->getType();
+ if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
+ return IRB.CreateIntCast(V, dstTy, false);
+ if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
+ dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
+ return IRB.CreateIntCast(V, dstTy, false);
+ size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
+ size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
+ Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
+ Value *V2 =
+ IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false);
+ return IRB.CreateBitCast(V2, dstTy);
+ // TODO: handle struct types.
+ }
+
+ /// \brief Propagate shadow for arbitrary operation.
+ void handleShadowOr(Instruction &I) {
+ IRBuilder<> IRB(&I);
+ ShadowAndOriginCombiner SC(this, IRB);
+ for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
+ SC.Add(OI->get());
+ SC.Done(&I);
+ }
+
+ void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
+ void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
+ void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
+ void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
+ void visitSub(BinaryOperator &I) { handleShadowOr(I); }
+ void visitXor(BinaryOperator &I) { handleShadowOr(I); }
+ void visitMul(BinaryOperator &I) { handleShadowOr(I); }
+
+ void handleDiv(Instruction &I) {
+ IRBuilder<> IRB(&I);
+ // Strict on the second argument.
+ insertCheck(I.getOperand(1), &I);
+ setShadow(&I, getShadow(&I, 0));
+ setOrigin(&I, getOrigin(&I, 0));
+ }
+
+ void visitUDiv(BinaryOperator &I) { handleDiv(I); }
+ void visitSDiv(BinaryOperator &I) { handleDiv(I); }
+ void visitFDiv(BinaryOperator &I) { handleDiv(I); }
+ void visitURem(BinaryOperator &I) { handleDiv(I); }
+ void visitSRem(BinaryOperator &I) { handleDiv(I); }
+ void visitFRem(BinaryOperator &I) { handleDiv(I); }
+
+ /// \brief Instrument == and != comparisons.
+ ///
+ /// Sometimes the comparison result is known even if some of the bits of the
+ /// arguments are not.
+ void handleEqualityComparison(ICmpInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *A = I.getOperand(0);
+ Value *B = I.getOperand(1);
+ Value *Sa = getShadow(A);
+ Value *Sb = getShadow(B);
+
+ // Get rid of pointers and vectors of pointers.
+ // For ints (and vectors of ints), types of A and Sa match,
+ // and this is a no-op.
+ A = IRB.CreatePointerCast(A, Sa->getType());
+ B = IRB.CreatePointerCast(B, Sb->getType());
+
+ // A == B <==> (C = A^B) == 0
+ // A != B <==> (C = A^B) != 0
+ // Sc = Sa | Sb
+ Value *C = IRB.CreateXor(A, B);
+ Value *Sc = IRB.CreateOr(Sa, Sb);
+ // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
+ // Result is defined if one of the following is true
+ // * there is a defined 1 bit in C
+ // * C is fully defined
+ // Si = !(C & ~Sc) && Sc
+ Value *Zero = Constant::getNullValue(Sc->getType());
+ Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
+ Value *Si =
+ IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
+ IRB.CreateICmpEQ(
+ IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
+ Si->setName("_msprop_icmp");
+ setShadow(&I, Si);
+ setOriginForNaryOp(I);
+ }
+
+ /// \brief Build the lowest possible value of V, taking into account V's
+ /// uninitialized bits.
+ Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
+ bool isSigned) {
+ if (isSigned) {
+ // Split shadow into sign bit and other bits.
+ Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
+ Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
+ // Maximise the undefined shadow bit, minimize other undefined bits.
+ return
+ IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
+ } else {
+ // Minimize undefined bits.
+ return IRB.CreateAnd(A, IRB.CreateNot(Sa));
+ }
+ }
+
+ /// \brief Build the highest possible value of V, taking into account V's
+ /// uninitialized bits.
+ Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
+ bool isSigned) {
+ if (isSigned) {
+ // Split shadow into sign bit and other bits.
+ Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
+ Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
+ // Minimise the undefined shadow bit, maximise other undefined bits.
+ return
+ IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
+ } else {
+ // Maximize undefined bits.
+ return IRB.CreateOr(A, Sa);
+ }
+ }
+
+ /// \brief Instrument relational comparisons.
+ ///
+ /// This function does exact shadow propagation for all relational
+ /// comparisons of integers, pointers and vectors of those.
+ /// FIXME: output seems suboptimal when one of the operands is a constant
+ void handleRelationalComparisonExact(ICmpInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *A = I.getOperand(0);
+ Value *B = I.getOperand(1);
+ Value *Sa = getShadow(A);
+ Value *Sb = getShadow(B);
+
+ // Get rid of pointers and vectors of pointers.
+ // For ints (and vectors of ints), types of A and Sa match,
+ // and this is a no-op.
+ A = IRB.CreatePointerCast(A, Sa->getType());
+ B = IRB.CreatePointerCast(B, Sb->getType());
+
+ // Let [a0, a1] be the interval of possible values of A, taking into account
+ // its undefined bits. Let [b0, b1] be the interval of possible values of B.
+ // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
+ bool IsSigned = I.isSigned();
+ Value *S1 = IRB.CreateICmp(I.getPredicate(),
+ getLowestPossibleValue(IRB, A, Sa, IsSigned),
+ getHighestPossibleValue(IRB, B, Sb, IsSigned));
+ Value *S2 = IRB.CreateICmp(I.getPredicate(),
+ getHighestPossibleValue(IRB, A, Sa, IsSigned),
+ getLowestPossibleValue(IRB, B, Sb, IsSigned));
+ Value *Si = IRB.CreateXor(S1, S2);
+ setShadow(&I, Si);
+ setOriginForNaryOp(I);
+ }
+
+ /// \brief Instrument signed relational comparisons.
+ ///
+ /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
+ /// propagating the highest bit of the shadow. Everything else is delegated
+ /// to handleShadowOr().
+ void handleSignedRelationalComparison(ICmpInst &I) {
+ Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
+ Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
+ Value* op = NULL;
+ CmpInst::Predicate pre = I.getPredicate();
+ if (constOp0 && constOp0->isNullValue() &&
+ (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
+ op = I.getOperand(1);
+ } else if (constOp1 && constOp1->isNullValue() &&
+ (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
+ op = I.getOperand(0);
+ }
+ if (op) {
+ IRBuilder<> IRB(&I);
+ Value* Shadow =
+ IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
+ setShadow(&I, Shadow);
+ setOrigin(&I, getOrigin(op));
+ } else {
+ handleShadowOr(I);
+ }
+ }
+
+ void visitICmpInst(ICmpInst &I) {
+ if (!ClHandleICmp) {
+ handleShadowOr(I);
+ return;
+ }
+ if (I.isEquality()) {
+ handleEqualityComparison(I);
+ return;
+ }
+
+ assert(I.isRelational());
+ if (ClHandleICmpExact) {
+ handleRelationalComparisonExact(I);
+ return;
+ }
+ if (I.isSigned()) {
+ handleSignedRelationalComparison(I);
+ return;
+ }
+
+ assert(I.isUnsigned());
+ if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
+ handleRelationalComparisonExact(I);
+ return;
+ }
+
+ handleShadowOr(I);
+ }
+
+ void visitFCmpInst(FCmpInst &I) {
+ handleShadowOr(I);
+ }
+
+ void handleShift(BinaryOperator &I) {
+ IRBuilder<> IRB(&I);
+ // If any of the S2 bits are poisoned, the whole thing is poisoned.
+ // Otherwise perform the same shift on S1.
+ Value *S1 = getShadow(&I, 0);
+ Value *S2 = getShadow(&I, 1);
+ Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
+ S2->getType());
+ Value *V2 = I.getOperand(1);
+ Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
+ setShadow(&I, IRB.CreateOr(Shift, S2Conv));
+ setOriginForNaryOp(I);
+ }
+
+ void visitShl(BinaryOperator &I) { handleShift(I); }
+ void visitAShr(BinaryOperator &I) { handleShift(I); }
+ void visitLShr(BinaryOperator &I) { handleShift(I); }
+
+ /// \brief Instrument llvm.memmove
+ ///
+ /// At this point we don't know if llvm.memmove will be inlined or not.
+ /// If we don't instrument it and it gets inlined,
+ /// our interceptor will not kick in and we will lose the memmove.
+ /// If we instrument the call here, but it does not get inlined,
+ /// we will memove the shadow twice: which is bad in case
+ /// of overlapping regions. So, we simply lower the intrinsic to a call.
+ ///
+ /// Similar situation exists for memcpy and memset.
+ void visitMemMoveInst(MemMoveInst &I) {
+ IRBuilder<> IRB(&I);
+ IRB.CreateCall3(
+ MS.MemmoveFn,
+ IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ I.eraseFromParent();
+ }
+
+ // Similar to memmove: avoid copying shadow twice.
+ // This is somewhat unfortunate as it may slowdown small constant memcpys.
+ // FIXME: consider doing manual inline for small constant sizes and proper
+ // alignment.
+ void visitMemCpyInst(MemCpyInst &I) {
+ IRBuilder<> IRB(&I);
+ IRB.CreateCall3(
+ MS.MemcpyFn,
+ IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ I.eraseFromParent();
+ }
+
+ // Same as memcpy.
+ void visitMemSetInst(MemSetInst &I) {
+ IRBuilder<> IRB(&I);
+ IRB.CreateCall3(
+ MS.MemsetFn,
+ IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ I.eraseFromParent();
+ }
+
+ void visitVAStartInst(VAStartInst &I) {
+ VAHelper->visitVAStartInst(I);
+ }
+
+ void visitVACopyInst(VACopyInst &I) {
+ VAHelper->visitVACopyInst(I);
+ }
+
+ enum IntrinsicKind {
+ IK_DoesNotAccessMemory,
+ IK_OnlyReadsMemory,
+ IK_WritesMemory
+ };
+
+ static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
+ const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
+ const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
+ const int OnlyReadsMemory = IK_OnlyReadsMemory;
+ const int OnlyAccessesArgumentPointees = IK_WritesMemory;
+ const int UnknownModRefBehavior = IK_WritesMemory;
+#define GET_INTRINSIC_MODREF_BEHAVIOR
+#define ModRefBehavior IntrinsicKind
+#include "llvm/IR/Intrinsics.gen"
+#undef ModRefBehavior
+#undef GET_INTRINSIC_MODREF_BEHAVIOR
+ }
+
+ /// \brief Handle vector store-like intrinsics.
+ ///
+ /// Instrument intrinsics that look like a simple SIMD store: writes memory,
+ /// has 1 pointer argument and 1 vector argument, returns void.
+ bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value* Addr = I.getArgOperand(0);
+ Value *Shadow = getShadow(&I, 1);
+ Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
+
+ // We don't know the pointer alignment (could be unaligned SSE store!).
+ // Have to assume to worst case.
+ IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
+
+ if (ClCheckAccessAddress)
+ insertCheck(Addr, &I);
+
+ // FIXME: use ClStoreCleanOrigin
+ // FIXME: factor out common code from materializeStores
+ if (MS.TrackOrigins)
+ IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
+ return true;
+ }
+
+ /// \brief Handle vector load-like intrinsics.
+ ///
+ /// Instrument intrinsics that look like a simple SIMD load: reads memory,
+ /// has 1 pointer argument, returns a vector.
+ bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Addr = I.getArgOperand(0);
+
+ Type *ShadowTy = getShadowTy(&I);
+ if (LoadShadow) {
+ Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
+ // We don't know the pointer alignment (could be unaligned SSE load!).
+ // Have to assume to worst case.
+ setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
+ } else {
+ setShadow(&I, getCleanShadow(&I));
+ }
+
+
+ if (ClCheckAccessAddress)
+ insertCheck(Addr, &I);
+
+ if (MS.TrackOrigins) {
+ if (LoadShadow)
+ setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
+ else
+ setOrigin(&I, getCleanOrigin());
+ }
+ return true;
+ }
+
+ /// \brief Handle (SIMD arithmetic)-like intrinsics.
+ ///
+ /// Instrument intrinsics with any number of arguments of the same type,
+ /// equal to the return type. The type should be simple (no aggregates or
+ /// pointers; vectors are fine).
+ /// Caller guarantees that this intrinsic does not access memory.
+ bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
+ Type *RetTy = I.getType();
+ if (!(RetTy->isIntOrIntVectorTy() ||
+ RetTy->isFPOrFPVectorTy() ||
+ RetTy->isX86_MMXTy()))
+ return false;
+
+ unsigned NumArgOperands = I.getNumArgOperands();
+
+ for (unsigned i = 0; i < NumArgOperands; ++i) {
+ Type *Ty = I.getArgOperand(i)->getType();
+ if (Ty != RetTy)
+ return false;
+ }
+
+ IRBuilder<> IRB(&I);
+ ShadowAndOriginCombiner SC(this, IRB);
+ for (unsigned i = 0; i < NumArgOperands; ++i)
+ SC.Add(I.getArgOperand(i));
+ SC.Done(&I);
+
+ return true;
+ }
+
+ /// \brief Heuristically instrument unknown intrinsics.
+ ///
+ /// The main purpose of this code is to do something reasonable with all
+ /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
+ /// We recognize several classes of intrinsics by their argument types and
+ /// ModRefBehaviour and apply special intrumentation when we are reasonably
+ /// sure that we know what the intrinsic does.
+ ///
+ /// We special-case intrinsics where this approach fails. See llvm.bswap
+ /// handling as an example of that.
+ bool handleUnknownIntrinsic(IntrinsicInst &I) {
+ unsigned NumArgOperands = I.getNumArgOperands();
+ if (NumArgOperands == 0)
+ return false;
+
+ Intrinsic::ID iid = I.getIntrinsicID();
+ IntrinsicKind IK = getIntrinsicKind(iid);
+ bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
+ bool WritesMemory = IK == IK_WritesMemory;
+ assert(!(OnlyReadsMemory && WritesMemory));
+
+ if (NumArgOperands == 2 &&
+ I.getArgOperand(0)->getType()->isPointerTy() &&
+ I.getArgOperand(1)->getType()->isVectorTy() &&
+ I.getType()->isVoidTy() &&
+ WritesMemory) {
+ // This looks like a vector store.
+ return handleVectorStoreIntrinsic(I);
+ }
+
+ if (NumArgOperands == 1 &&
+ I.getArgOperand(0)->getType()->isPointerTy() &&
+ I.getType()->isVectorTy() &&
+ OnlyReadsMemory) {
+ // This looks like a vector load.
+ return handleVectorLoadIntrinsic(I);
+ }
+
+ if (!OnlyReadsMemory && !WritesMemory)
+ if (maybeHandleSimpleNomemIntrinsic(I))
+ return true;
+
+ // FIXME: detect and handle SSE maskstore/maskload
+ return false;
+ }
+
+ void handleBswap(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Op = I.getArgOperand(0);
+ Type *OpType = Op->getType();
+ Function *BswapFunc = Intrinsic::getDeclaration(
+ F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
+ setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
+ setOrigin(&I, getOrigin(Op));
+ }
+
+ void visitIntrinsicInst(IntrinsicInst &I) {
+ switch (I.getIntrinsicID()) {
+ case llvm::Intrinsic::bswap:
+ handleBswap(I);
+ break;
+ default:
+ if (!handleUnknownIntrinsic(I))
+ visitInstruction(I);
+ break;
+ }
+ }
+
+ void visitCallSite(CallSite CS) {
+ Instruction &I = *CS.getInstruction();
+ assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
+ if (CS.isCall()) {
+ CallInst *Call = cast<CallInst>(&I);
+
+ // For inline asm, do the usual thing: check argument shadow and mark all
+ // outputs as clean. Note that any side effects of the inline asm that are
+ // not immediately visible in its constraints are not handled.
+ if (Call->isInlineAsm()) {
+ visitInstruction(I);
+ return;
+ }
+
+ // Allow only tail calls with the same types, otherwise
+ // we may have a false positive: shadow for a non-void RetVal
+ // will get propagated to a void RetVal.
+ if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
+ Call->setTailCall(false);
+
+ assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
+
+ // We are going to insert code that relies on the fact that the callee
+ // will become a non-readonly function after it is instrumented by us. To
+ // prevent this code from being optimized out, mark that function
+ // non-readonly in advance.
+ if (Function *Func = Call->getCalledFunction()) {
+ // Clear out readonly/readnone attributes.
+ AttrBuilder B;
+ B.addAttribute(Attribute::ReadOnly)
+ .addAttribute(Attribute::ReadNone);
+ Func->removeAttributes(AttributeSet::FunctionIndex,
+ AttributeSet::get(Func->getContext(),
+ AttributeSet::FunctionIndex,
+ B));
+ }
+ }
+ IRBuilder<> IRB(&I);
+ unsigned ArgOffset = 0;
+ DEBUG(dbgs() << " CallSite: " << I << "\n");
+ for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
+ ArgIt != End; ++ArgIt) {
+ Value *A = *ArgIt;
+ unsigned i = ArgIt - CS.arg_begin();
+ if (!A->getType()->isSized()) {
+ DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
+ continue;
+ }
+ unsigned Size = 0;
+ Value *Store = 0;
+ // Compute the Shadow for arg even if it is ByVal, because
+ // in that case getShadow() will copy the actual arg shadow to
+ // __msan_param_tls.
+ Value *ArgShadow = getShadow(A);
+ Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
+ DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
+ " Shadow: " << *ArgShadow << "\n");
+ if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
+ assert(A->getType()->isPointerTy() &&
+ "ByVal argument is not a pointer!");
+ Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
+ unsigned Alignment = CS.getParamAlignment(i + 1);
+ Store = IRB.CreateMemCpy(ArgShadowBase,
+ getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
+ Size, Alignment);
+ } else {
+ Size = MS.TD->getTypeAllocSize(A->getType());
+ Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
+ kShadowTLSAlignment);
+ }
+ if (MS.TrackOrigins)
+ IRB.CreateStore(getOrigin(A),
+ getOriginPtrForArgument(A, IRB, ArgOffset));
+ (void)Store;
+ assert(Size != 0 && Store != 0);
+ DEBUG(dbgs() << " Param:" << *Store << "\n");
+ ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
+ }
+ DEBUG(dbgs() << " done with call args\n");
+
+ FunctionType *FT =
+ cast<FunctionType>(CS.getCalledValue()->getType()-> getContainedType(0));
+ if (FT->isVarArg()) {
+ VAHelper->visitCallSite(CS, IRB);
+ }
+
+ // Now, get the shadow for the RetVal.
+ if (!I.getType()->isSized()) return;
+ IRBuilder<> IRBBefore(&I);
+ // Untill we have full dynamic coverage, make sure the retval shadow is 0.
+ Value *Base = getShadowPtrForRetval(&I, IRBBefore);
+ IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
+ Instruction *NextInsn = 0;
+ if (CS.isCall()) {
+ NextInsn = I.getNextNode();
+ } else {
+ BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
+ if (!NormalDest->getSinglePredecessor()) {
+ // FIXME: this case is tricky, so we are just conservative here.
+ // Perhaps we need to split the edge between this BB and NormalDest,
+ // but a naive attempt to use SplitEdge leads to a crash.
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ return;
+ }
+ NextInsn = NormalDest->getFirstInsertionPt();
+ assert(NextInsn &&
+ "Could not find insertion point for retval shadow load");
+ }
+ IRBuilder<> IRBAfter(NextInsn);
+ Value *RetvalShadow =
+ IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
+ kShadowTLSAlignment, "_msret");
+ setShadow(&I, RetvalShadow);
+ if (MS.TrackOrigins)
+ setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
+ }
+
+ void visitReturnInst(ReturnInst &I) {
+ IRBuilder<> IRB(&I);
+ if (Value *RetVal = I.getReturnValue()) {
+ // Set the shadow for the RetVal.
+ Value *Shadow = getShadow(RetVal);
+ Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
+ DEBUG(dbgs() << "Return: " << *Shadow << "\n" << *ShadowPtr << "\n");
+ IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
+ if (MS.TrackOrigins)
+ IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
+ }
+ }
+
+ void visitPHINode(PHINode &I) {
+ IRBuilder<> IRB(&I);
+ ShadowPHINodes.push_back(&I);
+ setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
+ "_msphi_s"));
+ if (MS.TrackOrigins)
+ setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
+ "_msphi_o"));
+ }
+
+ void visitAllocaInst(AllocaInst &I) {
+ setShadow(&I, getCleanShadow(&I));
+ if (!ClPoisonStack) return;
+ IRBuilder<> IRB(I.getNextNode());
+ uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
+ if (ClPoisonStackWithCall) {
+ IRB.CreateCall2(MS.MsanPoisonStackFn,
+ IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
+ ConstantInt::get(MS.IntptrTy, Size));
+ } else {
+ Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
+ IRB.CreateMemSet(ShadowBase, IRB.getInt8(ClPoisonStackPattern),
+ Size, I.getAlignment());
+ }
+
+ if (MS.TrackOrigins) {
+ setOrigin(&I, getCleanOrigin());
+ SmallString<2048> StackDescriptionStorage;
+ raw_svector_ostream StackDescription(StackDescriptionStorage);
+ // We create a string with a description of the stack allocation and
+ // pass it into __msan_set_alloca_origin.
+ // It will be printed by the run-time if stack-originated UMR is found.
+ // The first 4 bytes of the string are set to '----' and will be replaced
+ // by __msan_va_arg_overflow_size_tls at the first call.
+ StackDescription << "----" << I.getName() << "@" << F.getName();
+ Value *Descr =
+ createPrivateNonConstGlobalForString(*F.getParent(),
+ StackDescription.str());
+ IRB.CreateCall3(MS.MsanSetAllocaOriginFn,
+ IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
+ ConstantInt::get(MS.IntptrTy, Size),
+ IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()));
+ }
+ }
+
+ void visitSelectInst(SelectInst& I) {
+ IRBuilder<> IRB(&I);
+ setShadow(&I, IRB.CreateSelect(I.getCondition(),
+ getShadow(I.getTrueValue()), getShadow(I.getFalseValue()),
+ "_msprop"));
+ if (MS.TrackOrigins) {
+ // Origins are always i32, so any vector conditions must be flattened.
+ // FIXME: consider tracking vector origins for app vectors?
+ Value *Cond = I.getCondition();
+ if (Cond->getType()->isVectorTy()) {
+ Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB);
+ Cond = IRB.CreateICmpNE(ConvertedShadow,
+ getCleanShadow(ConvertedShadow), "_mso_select");
+ }
+ setOrigin(&I, IRB.CreateSelect(Cond,
+ getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())));
+ }
+ }
+
+ void visitLandingPadInst(LandingPadInst &I) {
+ // Do nothing.
+ // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ }
+
+ void visitGetElementPtrInst(GetElementPtrInst &I) {
+ handleShadowOr(I);
+ }
+
+ void visitExtractValueInst(ExtractValueInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Agg = I.getAggregateOperand();
+ DEBUG(dbgs() << "ExtractValue: " << I << "\n");
+ Value *AggShadow = getShadow(Agg);
+ DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
+ Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
+ DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
+ setShadow(&I, ResShadow);
+ setOrigin(&I, getCleanOrigin());
+ }
+
+ void visitInsertValueInst(InsertValueInst &I) {
+ IRBuilder<> IRB(&I);
+ DEBUG(dbgs() << "InsertValue: " << I << "\n");
+ Value *AggShadow = getShadow(I.getAggregateOperand());
+ Value *InsShadow = getShadow(I.getInsertedValueOperand());
+ DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
+ DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
+ Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
+ DEBUG(dbgs() << " Res: " << *Res << "\n");
+ setShadow(&I, Res);
+ setOrigin(&I, getCleanOrigin());
+ }
+
+ void dumpInst(Instruction &I) {
+ if (CallInst *CI = dyn_cast<CallInst>(&I)) {
+ errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
+ } else {
+ errs() << "ZZZ " << I.getOpcodeName() << "\n";
+ }
+ errs() << "QQQ " << I << "\n";
+ }
+
+ void visitResumeInst(ResumeInst &I) {
+ DEBUG(dbgs() << "Resume: " << I << "\n");
+ // Nothing to do here.
+ }
+
+ void visitInstruction(Instruction &I) {
+ // Everything else: stop propagating and check for poisoned shadow.
+ if (ClDumpStrictInstructions)
+ dumpInst(I);
+ DEBUG(dbgs() << "DEFAULT: " << I << "\n");
+ for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
+ insertCheck(I.getOperand(i), &I);
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ }
+};
+
+/// \brief AMD64-specific implementation of VarArgHelper.
+struct VarArgAMD64Helper : public VarArgHelper {
+ // An unfortunate workaround for asymmetric lowering of va_arg stuff.
+ // See a comment in visitCallSite for more details.
+ static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
+ static const unsigned AMD64FpEndOffset = 176;
+
+ Function &F;
+ MemorySanitizer &MS;
+ MemorySanitizerVisitor &MSV;
+ Value *VAArgTLSCopy;
+ Value *VAArgOverflowSize;
+
+ SmallVector<CallInst*, 16> VAStartInstrumentationList;
+
+ VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV)
+ : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
+
+ enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
+
+ ArgKind classifyArgument(Value* arg) {
+ // A very rough approximation of X86_64 argument classification rules.
+ Type *T = arg->getType();
+ if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
+ return AK_FloatingPoint;
+ if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
+ return AK_GeneralPurpose;
+ if (T->isPointerTy())
+ return AK_GeneralPurpose;
+ return AK_Memory;
+ }
+
+ // For VarArg functions, store the argument shadow in an ABI-specific format
+ // that corresponds to va_list layout.
+ // We do this because Clang lowers va_arg in the frontend, and this pass
+ // only sees the low level code that deals with va_list internals.
+ // A much easier alternative (provided that Clang emits va_arg instructions)
+ // would have been to associate each live instance of va_list with a copy of
+ // MSanParamTLS, and extract shadow on va_arg() call in the argument list
+ // order.
+ void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {
+ unsigned GpOffset = 0;
+ unsigned FpOffset = AMD64GpEndOffset;
+ unsigned OverflowOffset = AMD64FpEndOffset;
+ for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
+ ArgIt != End; ++ArgIt) {
+ Value *A = *ArgIt;
+ ArgKind AK = classifyArgument(A);
+ if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
+ AK = AK_Memory;
+ if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
+ AK = AK_Memory;
+ Value *Base;
+ switch (AK) {
+ case AK_GeneralPurpose:
+ Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
+ GpOffset += 8;
+ break;
+ case AK_FloatingPoint:
+ Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
+ FpOffset += 16;
+ break;
+ case AK_Memory:
+ uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
+ Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
+ OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
+ }
+ IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
+ }
+ Constant *OverflowSize =
+ ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
+ IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
+ }
+
+ /// \brief Compute the shadow address for a given va_arg.
+ Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
+ int ArgOffset) {
+ Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
+ "_msarg");
+ }
+
+ void visitVAStartInst(VAStartInst &I) {
+ IRBuilder<> IRB(&I);
+ VAStartInstrumentationList.push_back(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
+
+ // Unpoison the whole __va_list_tag.
+ // FIXME: magic ABI constants.
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */24, /* alignment */8, false);
+ }
+
+ void visitVACopyInst(VACopyInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
+
+ // Unpoison the whole __va_list_tag.
+ // FIXME: magic ABI constants.
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */24, /* alignment */8, false);
+ }
+
+ void finalizeInstrumentation() {
+ assert(!VAArgOverflowSize && !VAArgTLSCopy &&
+ "finalizeInstrumentation called twice");
+ if (!VAStartInstrumentationList.empty()) {
+ // If there is a va_start in this function, make a backup copy of
+ // va_arg_tls somewhere in the function entry block.
+ IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
+ VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
+ Value *CopySize =
+ IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
+ VAArgOverflowSize);
+ VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
+ IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
+ }
+
+ // Instrument va_start.
+ // Copy va_list shadow from the backup copy of the TLS contents.
+ for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
+ CallInst *OrigInst = VAStartInstrumentationList[i];
+ IRBuilder<> IRB(OrigInst->getNextNode());
+ Value *VAListTag = OrigInst->getArgOperand(0);
+
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreateIntToPtr(
+ IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
+ ConstantInt::get(MS.IntptrTy, 16)),
+ Type::getInt64PtrTy(*MS.C));
+ Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
+ Value *RegSaveAreaShadowPtr =
+ MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
+ IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
+ AMD64FpEndOffset, 16);
+
+ Value *OverflowArgAreaPtrPtr =
+ IRB.CreateIntToPtr(
+ IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
+ ConstantInt::get(MS.IntptrTy, 8)),
+ Type::getInt64PtrTy(*MS.C));
+ Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
+ Value *OverflowArgAreaShadowPtr =
+ MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
+ Value *SrcPtr =
+ getShadowPtrForVAArgument(VAArgTLSCopy, IRB, AMD64FpEndOffset);
+ IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
+ }
+ }
+};
+
+VarArgHelper* CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
+ MemorySanitizerVisitor &Visitor) {
+ return new VarArgAMD64Helper(Func, Msan, Visitor);
+}
+
+} // namespace
+
+bool MemorySanitizer::runOnFunction(Function &F) {
+ MemorySanitizerVisitor Visitor(F, *this);
+
+ // Clear out readonly/readnone attributes.
+ AttrBuilder B;
+ B.addAttribute(Attribute::ReadOnly)
+ .addAttribute(Attribute::ReadNone);
+ F.removeAttributes(AttributeSet::FunctionIndex,
+ AttributeSet::get(F.getContext(),
+ AttributeSet::FunctionIndex, B));
+
+ return Visitor.runOnFunction();
+}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
index 1fe12545d294..b45aef65bc76 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
@@ -13,20 +13,20 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "insert-optimal-edge-profiling"
+#include "llvm/Transforms/Instrumentation.h"
+#include "MaximumSpanningTree.h"
#include "ProfilingUtils.h"
-#include "llvm/Constants.h"
-#include "llvm/Module.h"
-#include "llvm/Pass.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Analysis/ProfileInfoLoader.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "MaximumSpanningTree.h"
using namespace llvm;
STATISTIC(NumEdgesInserted, "The # of edges inserted.");
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
index cc27146ebcf0..7de73269cf2b 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
@@ -45,24 +45,23 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "insert-path-profiling"
-#include "llvm/DerivedTypes.h"
+#include "llvm/Transforms/Instrumentation.h"
#include "ProfilingUtils.h"
#include "llvm/Analysis/PathNumbering.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/InstrTypes.h"
-#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/TypeBuilder.h"
#include "llvm/Pass.h"
-#include "llvm/TypeBuilder.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Instrumentation.h"
#include <vector>
#define HASH_THRESHHOLD 100000
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index de57cd173483..4b3de6d7fc38 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -15,11 +15,11 @@
//===----------------------------------------------------------------------===//
#include "ProfilingUtils.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
GlobalValue *Array,
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 9e10fc4416de..299060a42fe8 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -21,31 +21,32 @@
#define DEBUG_TYPE "tsan"
-#include "BlackList.h"
-#include "llvm/Function.h"
-#include "llvm/IRBuilder.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Metadata.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
+#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/DataLayout.h"
-#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/BlackList.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
using namespace llvm;
-static cl::opt<std::string> ClBlackListFile("tsan-blacklist",
+static cl::opt<std::string> ClBlacklistFile("tsan-blacklist",
cl::desc("Blacklist file"), cl::Hidden);
static cl::opt<bool> ClInstrumentMemoryAccesses(
"tsan-instrument-memory-accesses", cl::init(true),
@@ -56,6 +57,9 @@ static cl::opt<bool> ClInstrumentFuncEntryExit(
static cl::opt<bool> ClInstrumentAtomics(
"tsan-instrument-atomics", cl::init(true),
cl::desc("Instrument atomics"), cl::Hidden);
+static cl::opt<bool> ClInstrumentMemIntrinsics(
+ "tsan-instrument-memintrinsics", cl::init(true),
+ cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
@@ -63,6 +67,7 @@ STATISTIC(NumOmittedReadsBeforeWrite,
"Number of reads ignored due to following writes");
STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
+STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
STATISTIC(NumOmittedReadsFromConstantGlobals,
"Number of reads from constant globals");
STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
@@ -71,21 +76,29 @@ namespace {
/// ThreadSanitizer: instrument the code in module to find races.
struct ThreadSanitizer : public FunctionPass {
- ThreadSanitizer();
+ ThreadSanitizer(StringRef BlacklistFile = StringRef())
+ : FunctionPass(ID),
+ TD(0),
+ BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
+ : BlacklistFile) { }
const char *getPassName() const;
bool runOnFunction(Function &F);
bool doInitialization(Module &M);
static char ID; // Pass identification, replacement for typeid.
private:
+ void initializeCallbacks(Module &M);
bool instrumentLoadOrStore(Instruction *I);
bool instrumentAtomic(Instruction *I);
+ bool instrumentMemIntrinsic(Instruction *I);
void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
SmallVectorImpl<Instruction*> &All);
bool addrPointsToConstantData(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr);
DataLayout *TD;
+ Type *IntptrTy;
+ SmallString<64> BlacklistFile;
OwningPtr<BlackList> BL;
IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
@@ -102,6 +115,8 @@ struct ThreadSanitizer : public FunctionPass {
Function *TsanAtomicThreadFence;
Function *TsanAtomicSignalFence;
Function *TsanVptrUpdate;
+ Function *TsanVptrLoad;
+ Function *MemmoveFn, *MemcpyFn, *MemsetFn;
};
} // namespace
@@ -114,13 +129,8 @@ const char *ThreadSanitizer::getPassName() const {
return "ThreadSanitizer";
}
-ThreadSanitizer::ThreadSanitizer()
- : FunctionPass(ID),
- TD(NULL) {
-}
-
-FunctionPass *llvm::createThreadSanitizerPass() {
- return new ThreadSanitizer();
+FunctionPass *llvm::createThreadSanitizerPass(StringRef BlacklistFile) {
+ return new ThreadSanitizer(BlacklistFile);
}
static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
@@ -130,18 +140,8 @@ static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
report_fatal_error("ThreadSanitizer interface function redefined");
}
-bool ThreadSanitizer::doInitialization(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
- return false;
- BL.reset(new BlackList(ClBlackListFile));
-
- // Always insert a call to __tsan_init into the module's CTORs.
+void ThreadSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(M.getContext());
- Value *TsanInit = M.getOrInsertFunction("__tsan_init",
- IRB.getVoidTy(), NULL);
- appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
-
// Initialize the callbacks.
TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction(
"__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
@@ -188,6 +188,8 @@ bool ThreadSanitizer::doInitialization(Module &M) {
NamePart = "_fetch_or";
else if (op == AtomicRMWInst::Xor)
NamePart = "_fetch_xor";
+ else if (op == AtomicRMWInst::Nand)
+ NamePart = "_fetch_nand";
else
continue;
SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
@@ -198,15 +200,42 @@ bool ThreadSanitizer::doInitialization(Module &M) {
SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) +
"_compare_exchange_val");
TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction(
- AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, NULL));
+ AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, NULL));
}
TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
"__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), NULL));
+ TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction(
+ "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction(
"__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL));
TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
"__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL));
+
+ MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
+ "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IntptrTy, NULL));
+ MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
+ "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IntptrTy, NULL));
+ MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
+ "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
+ IntptrTy, NULL));
+}
+
+bool ThreadSanitizer::doInitialization(Module &M) {
+ TD = getAnalysisIfAvailable<DataLayout>();
+ if (!TD)
+ return false;
+ BL.reset(new BlackList(BlacklistFile));
+
+ // Always insert a call to __tsan_init into the module's CTORs.
+ IRBuilder<> IRB(M.getContext());
+ IntptrTy = IRB.getIntPtrTy(TD);
+ Value *TsanInit = M.getOrInsertFunction("__tsan_init",
+ IRB.getVoidTy(), NULL);
+ appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
+
return true;
}
@@ -297,10 +326,12 @@ static bool isAtomic(Instruction *I) {
bool ThreadSanitizer::runOnFunction(Function &F) {
if (!TD) return false;
if (BL->isIn(F)) return false;
+ initializeCallbacks(*F.getParent());
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> AllLoadsAndStores;
SmallVector<Instruction*, 8> LocalLoadsAndStores;
SmallVector<Instruction*, 8> AtomicAccesses;
+ SmallVector<Instruction*, 8> MemIntrinCalls;
bool Res = false;
bool HasCalls = false;
@@ -317,6 +348,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
else if (isa<ReturnInst>(BI))
RetVec.push_back(BI);
else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
+ if (isa<MemIntrinsic>(BI))
+ MemIntrinCalls.push_back(BI);
HasCalls = true;
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
}
@@ -340,6 +373,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
Res |= instrumentAtomic(AtomicAccesses[i]);
}
+ if (ClInstrumentMemIntrinsics)
+ for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) {
+ Res |= instrumentMemIntrinsic(MemIntrinCalls[i]);
+ }
+
// Instrument function entry/exit points if there were instrumented accesses.
if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
@@ -378,6 +416,12 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
NumInstrumentedVtableWrites++;
return true;
}
+ if (!IsWrite && isVtableAccess(I)) {
+ IRB.CreateCall(TsanVptrLoad,
+ IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
+ NumInstrumentedVtableReads++;
+ return true;
+ }
Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
if (IsWrite) NumInstrumentedWrites++;
@@ -391,7 +435,7 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
case NotAtomic: assert(false);
case Unordered: // Fall-through.
case Monotonic: v = 0; break;
- // case Consume: v = 1; break; // Not specified yet.
+ // case Consume: v = 1; break; // Not specified yet.
case Acquire: v = 2; break;
case Release: v = 3; break;
case AcquireRelease: v = 4; break;
@@ -400,6 +444,55 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
return IRB->getInt32(v);
}
+static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
+ uint32_t v = 0;
+ switch (ord) {
+ case NotAtomic: assert(false);
+ case Unordered: // Fall-through.
+ case Monotonic: v = 0; break;
+ // case Consume: v = 1; break; // Not specified yet.
+ case Acquire: v = 2; break;
+ case Release: v = 0; break;
+ case AcquireRelease: v = 2; break;
+ case SequentiallyConsistent: v = 5; break;
+ }
+ return IRB->getInt32(v);
+}
+
+// If a memset intrinsic gets inlined by the code gen, we will miss races on it.
+// So, we either need to ensure the intrinsic is not inlined, or instrument it.
+// We do not instrument memset/memmove/memcpy intrinsics (too complicated),
+// instead we simply replace them with regular function calls, which are then
+// intercepted by the run-time.
+// Since tsan is running after everyone else, the calls should not be
+// replaced back with intrinsics. If that becomes wrong at some point,
+// we will need to call e.g. __tsan_memset to avoid the intrinsics.
+bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
+ IRBuilder<> IRB(I);
+ if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
+ IRB.CreateCall3(MemsetFn,
+ IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
+ I->eraseFromParent();
+ } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
+ IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
+ IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
+ I->eraseFromParent();
+ }
+ return false;
+}
+
+// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
+// standards. For background see C++11 standard. A slightly older, publically
+// available draft of the standard (not entirely up-to-date, but close enough
+// for casual browsing) is available here:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
+// The following page contains more background information:
+// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
+
bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
IRBuilder<> IRB(I);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
@@ -461,7 +554,8 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
- createOrdering(&IRB, CASI->getOrdering())};
+ createOrdering(&IRB, CASI->getOrdering()),
+ createFailOrdering(&IRB, CASI->getOrdering())};
CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args));
ReplaceInstWithInst(I, C);
} else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {