#include "llvm/ProfileData/MemProf.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Function.h" #include "llvm/ProfileData/InstrProf.h" #include "llvm/ProfileData/SampleProf.h" #include "llvm/Support/Endian.h" #include "llvm/Support/EndianStream.h" namespace llvm { namespace memprof { void IndexedMemProfRecord::serialize(const MemProfSchema &Schema, raw_ostream &OS) { using namespace support; endian::Writer LE(OS, llvm::endianness::little); LE.write(AllocSites.size()); for (const IndexedAllocationInfo &N : AllocSites) { LE.write(N.CallStack.size()); for (const FrameId &Id : N.CallStack) LE.write(Id); N.Info.serialize(Schema, OS); } // Related contexts. LE.write(CallSites.size()); for (const auto &Frames : CallSites) { LE.write(Frames.size()); for (const FrameId &Id : Frames) LE.write(Id); } } IndexedMemProfRecord IndexedMemProfRecord::deserialize(const MemProfSchema &Schema, const unsigned char *Ptr) { using namespace support; IndexedMemProfRecord Record; // Read the meminfo nodes. const uint64_t NumNodes = endian::readNext(Ptr); for (uint64_t I = 0; I < NumNodes; I++) { IndexedAllocationInfo Node; const uint64_t NumFrames = endian::readNext(Ptr); for (uint64_t J = 0; J < NumFrames; J++) { const FrameId Id = endian::readNext(Ptr); Node.CallStack.push_back(Id); } Node.Info.deserialize(Schema, Ptr); Ptr += PortableMemInfoBlock::serializedSize(); Record.AllocSites.push_back(Node); } // Read the callsite information. const uint64_t NumCtxs = endian::readNext(Ptr); for (uint64_t J = 0; J < NumCtxs; J++) { const uint64_t NumFrames = endian::readNext(Ptr); llvm::SmallVector Frames; Frames.reserve(NumFrames); for (uint64_t K = 0; K < NumFrames; K++) { const FrameId Id = endian::readNext(Ptr); Frames.push_back(Id); } Record.CallSites.push_back(Frames); } return Record; } GlobalValue::GUID IndexedMemProfRecord::getGUID(const StringRef FunctionName) { // Canonicalize the function name to drop suffixes such as ".llvm.". Note // we do not drop any ".__uniq." suffixes, as getCanonicalFnName does not drop // those by default. This is by design to differentiate internal linkage // functions during matching. By dropping the other suffixes we can then match // functions in the profile use phase prior to their addition. Note that this // applies to both instrumented and sampled function names. StringRef CanonicalName = sampleprof::FunctionSamples::getCanonicalFnName(FunctionName); // We use the function guid which we expect to be a uint64_t. At // this time, it is the lower 64 bits of the md5 of the canonical // function name. return Function::getGUID(CanonicalName); } Expected readMemProfSchema(const unsigned char *&Buffer) { using namespace support; const unsigned char *Ptr = Buffer; const uint64_t NumSchemaIds = endian::readNext(Ptr); if (NumSchemaIds > static_cast(Meta::Size)) { return make_error(instrprof_error::malformed, "memprof schema invalid"); } MemProfSchema Result; for (size_t I = 0; I < NumSchemaIds; I++) { const uint64_t Tag = endian::readNext(Ptr); if (Tag >= static_cast(Meta::Size)) { return make_error(instrprof_error::malformed, "memprof schema invalid"); } Result.push_back(static_cast(Tag)); } // Advace the buffer to one past the schema if we succeeded. Buffer = Ptr; return Result; } } // namespace memprof } // namespace llvm