Skip to content

Commit

Permalink
[ORC] Add support for emulated TLS to ORCv2.
Browse files Browse the repository at this point in the history
This commit adds a ManglingOptions struct to IRMaterializationUnit, and replaces
IRCompileLayer::CompileFunction with a new IRCompileLayer::IRCompiler class. The
ManglingOptions struct defines the emulated-TLS state (via a bool member,
EmulatedTLS, which is true if emulated-TLS is enabled and false otherwise). The
IRCompileLayer::IRCompiler class wraps an IRCompiler (the same way that the
CompileFunction typedef used to), but adds a method to return the
IRCompileLayer::ManglingOptions that the compiler will use.

These changes allow us to correctly determine the symbols that will be produced
when a thread local global variable defined at the IR level is compiled with or
without emulated TLS. This is required for ORCv2, where MaterializationUnits
must declare their interface up-front.

Most ORCv2 clients should not require any changes. Clients writing custom IR
compilers will need to wrap their compiler in an IRCompileLayer::IRCompiler,
rather than an IRCompileLayer::CompileFunction, however this should be a
straightforward change (see modifications to CompileUtils.* in this patch for an
example).
  • Loading branch information
lhames committed Jan 22, 2020
1 parent dac7cda commit ce2207a
Show file tree
Hide file tree
Showing 18 changed files with 244 additions and 122 deletions.
19 changes: 14 additions & 5 deletions llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H

#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include <memory>

namespace llvm {
Expand All @@ -28,24 +30,31 @@ namespace orc {

class JITTargetMachineBuilder;

IRMaterializationUnit::ManglingOptions
irManglingOptionsFromTargetOptions(const TargetOptions &Opts);

/// Simple compile functor: Takes a single IR module and returns an ObjectFile.
/// This compiler supports a single compilation thread and LLVMContext only.
/// For multithreaded compilation, use ConcurrentIRCompiler below.
class SimpleCompiler {
class SimpleCompiler : public IRCompileLayer::IRCompiler {
public:
using CompileResult = std::unique_ptr<MemoryBuffer>;

/// Construct a simple compile functor with the given target.
SimpleCompiler(TargetMachine &TM, ObjectCache *ObjCache = nullptr)
: TM(TM), ObjCache(ObjCache) {}
: IRCompiler(irManglingOptionsFromTargetOptions(TM.Options)), TM(TM),
ObjCache(ObjCache) {}

/// Set an ObjectCache to query before compiling.
void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }

/// Compile a Module to an ObjectFile.
CompileResult operator()(Module &M);
Expected<CompileResult> operator()(Module &M) override;

private:
IRMaterializationUnit::ManglingOptions
manglingOptionsForTargetMachine(const TargetMachine &TM);

CompileResult tryToLoadFromObjectCache(const Module &M);
void notifyObjectCompiled(const Module &M, const MemoryBuffer &ObjBuffer);

Expand Down Expand Up @@ -73,14 +82,14 @@ class TMOwningSimpleCompiler : public SimpleCompiler {
///
/// This class creates a new TargetMachine and SimpleCompiler instance for each
/// compile.
class ConcurrentIRCompiler {
class ConcurrentIRCompiler : public IRCompileLayer::IRCompiler {
public:
ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
ObjectCache *ObjCache = nullptr);

void setObjectCache(ObjectCache *ObjCache) { this->ObjCache = ObjCache; }

std::unique_ptr<MemoryBuffer> operator()(Module &M);
Expected<std::unique_ptr<MemoryBuffer>> operator()(Module &M) override;

private:
JITTargetMachineBuilder JTMB;
Expand Down
29 changes: 24 additions & 5 deletions llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,29 @@ namespace orc {

class IRCompileLayer : public IRLayer {
public:
using CompileFunction =
std::function<Expected<std::unique_ptr<MemoryBuffer>>(Module &)>;
class IRCompiler {
public:
IRCompiler(IRMaterializationUnit::ManglingOptions MO) : MO(std::move(MO)) {}
virtual ~IRCompiler();
const IRMaterializationUnit::ManglingOptions &getManglingOptions() const {
return MO;
}
virtual Expected<std::unique_ptr<MemoryBuffer>> operator()(Module &M) = 0;

protected:
IRMaterializationUnit::ManglingOptions &manglingOptions() { return MO; }

private:
IRMaterializationUnit::ManglingOptions MO;
};

using NotifyCompiledFunction =
std::function<void(VModuleKey K, ThreadSafeModule TSM)>;

IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
CompileFunction Compile);
std::unique_ptr<IRCompiler> Compile);

IRCompiler &getCompiler() { return *Compile; }

void setNotifyCompiled(NotifyCompiledFunction NotifyCompiled);

Expand All @@ -45,7 +60,8 @@ class IRCompileLayer : public IRLayer {
private:
mutable std::mutex IRLayerMutex;
ObjectLayer &BaseLayer;
CompileFunction Compile;
std::unique_ptr<IRCompiler> Compile;
const IRMaterializationUnit::ManglingOptions *ManglingOpts;
NotifyCompiledFunction NotifyCompiled = NotifyCompiledFunction();
};

Expand Down Expand Up @@ -90,7 +106,10 @@ class LegacyIRCompileLayer {
/// Compile the module, and add the resulting object to the base layer
/// along with the given memory manager and symbol resolver.
Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
if (auto Err = BaseLayer.addObject(std::move(K), Compile(*M)))
auto Obj = Compile(*M);
if (!Obj)
return Obj.takeError();
if (auto Err = BaseLayer.addObject(std::move(K), std::move(*Obj)))
return Err;
if (NotifyCompiled)
NotifyCompiled(std::move(K), std::move(M));
Expand Down
4 changes: 2 additions & 2 deletions llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ class LLJIT {
static std::unique_ptr<ObjectLayer>
createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES);

static Expected<IRCompileLayer::CompileFunction>
static Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>
createCompileFunction(LLJITBuilderState &S, JITTargetMachineBuilder JTMB);

/// Create an LLJIT instance with a single compile thread.
Expand Down Expand Up @@ -193,7 +193,7 @@ class LLJITBuilderState {
ExecutionSession &, const Triple &TT)>;

using CompileFunctionCreator =
std::function<Expected<IRCompileLayer::CompileFunction>(
std::function<Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>(
JITTargetMachineBuilder JTMB)>;

std::unique_ptr<ExecutionSession> ES;
Expand Down
89 changes: 51 additions & 38 deletions llvm/include/llvm/ExecutionEngine/Orc/Layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,62 @@
namespace llvm {
namespace orc {

/// IRMaterializationUnit is a convenient base class for MaterializationUnits
/// wrapping LLVM IR. Represents materialization responsibility for all symbols
/// in the given module. If symbols are overridden by other definitions, then
/// their linkage is changed to available-externally.
class IRMaterializationUnit : public MaterializationUnit {
public:
struct ManglingOptions {
bool EmulatedTLS = false;
};

using SymbolNameToDefinitionMap = std::map<SymbolStringPtr, GlobalValue *>;

/// Create an IRMaterializationLayer. Scans the module to build the
/// SymbolFlags and SymbolToDefinition maps.
IRMaterializationUnit(ExecutionSession &ES, const ManglingOptions &MO,
ThreadSafeModule TSM, VModuleKey K);

/// Create an IRMaterializationLayer from a module, and pre-existing
/// SymbolFlags and SymbolToDefinition maps. The maps must provide
/// entries for each definition in M.
/// This constructor is useful for delegating work from one
/// IRMaterializationUnit to another.
IRMaterializationUnit(ThreadSafeModule TSM, VModuleKey K,
SymbolFlagsMap SymbolFlags,
SymbolNameToDefinitionMap SymbolToDefinition);

/// Return the ModuleIdentifier as the name for this MaterializationUnit.
StringRef getName() const override;

const ThreadSafeModule &getModule() const { return TSM; }

protected:
ThreadSafeModule TSM;
SymbolNameToDefinitionMap SymbolToDefinition;

private:
void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
};

/// Interface for layers that accept LLVM IR.
class IRLayer {
public:
IRLayer(ExecutionSession &ES);
IRLayer(ExecutionSession &ES,
const IRMaterializationUnit::ManglingOptions *&MO)
: ES(ES), MO(MO) {}

virtual ~IRLayer();

/// Returns the ExecutionSession for this layer.
ExecutionSession &getExecutionSession() { return ES; }

/// Get the mangling options for this layer.
const IRMaterializationUnit::ManglingOptions *&getManglingOptions() const {
return MO;
}

/// Sets the CloneToNewContextOnEmit flag (false by default).
///
/// When set, IR modules added to this layer will be cloned on to a new
Expand Down Expand Up @@ -57,49 +104,15 @@ class IRLayer {
private:
bool CloneToNewContextOnEmit = false;
ExecutionSession &ES;
};

/// IRMaterializationUnit is a convenient base class for MaterializationUnits
/// wrapping LLVM IR. Represents materialization responsibility for all symbols
/// in the given module. If symbols are overridden by other definitions, then
/// their linkage is changed to available-externally.
class IRMaterializationUnit : public MaterializationUnit {
public:
using SymbolNameToDefinitionMap = std::map<SymbolStringPtr, GlobalValue *>;

/// Create an IRMaterializationLayer. Scans the module to build the
/// SymbolFlags and SymbolToDefinition maps.
IRMaterializationUnit(ExecutionSession &ES, ThreadSafeModule TSM,
VModuleKey K);

/// Create an IRMaterializationLayer from a module, and pre-existing
/// SymbolFlags and SymbolToDefinition maps. The maps must provide
/// entries for each definition in M.
/// This constructor is useful for delegating work from one
/// IRMaterializationUnit to another.
IRMaterializationUnit(ThreadSafeModule TSM, VModuleKey K,
SymbolFlagsMap SymbolFlags,
SymbolNameToDefinitionMap SymbolToDefinition);

/// Return the ModuleIdentifier as the name for this MaterializationUnit.
StringRef getName() const override;

const ThreadSafeModule &getModule() const { return TSM; }

protected:
ThreadSafeModule TSM;
SymbolNameToDefinitionMap SymbolToDefinition;

private:
void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
const IRMaterializationUnit::ManglingOptions *&MO;
};

/// MaterializationUnit that materializes modules by calling the 'emit' method
/// on the given IRLayer.
class BasicIRLayerMaterializationUnit : public IRMaterializationUnit {
public:
BasicIRLayerMaterializationUnit(IRLayer &L, VModuleKey K,
ThreadSafeModule TSM);
BasicIRLayerMaterializationUnit(IRLayer &L, const ManglingOptions &MO,
ThreadSafeModule TSM, VModuleKey K);

private:

Expand Down
4 changes: 2 additions & 2 deletions llvm/include/llvm/ExecutionEngine/Orc/Speculation.h
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,8 @@ class IRSpeculationLayer : public IRLayer {
IRSpeculationLayer(ExecutionSession &ES, IRCompileLayer &BaseLayer,
Speculator &Spec, MangleAndInterner &Mangle,
ResultEval Interpreter)
: IRLayer(ES), NextLayer(BaseLayer), S(Spec), Mangle(Mangle),
QueryAnalysis(Interpreter) {}
: IRLayer(ES, BaseLayer.getManglingOptions()), NextLayer(BaseLayer),
S(Spec), Mangle(Mangle), QueryAnalysis(Interpreter) {}

void emit(MaterializationResponsibility R, ThreadSafeModule TSM);

Expand Down
39 changes: 19 additions & 20 deletions llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,11 @@ namespace orc {

class PartitioningIRMaterializationUnit : public IRMaterializationUnit {
public:
PartitioningIRMaterializationUnit(ExecutionSession &ES, ThreadSafeModule TSM,
VModuleKey K, CompileOnDemandLayer &Parent)
: IRMaterializationUnit(ES, std::move(TSM), std::move(K)),
PartitioningIRMaterializationUnit(ExecutionSession &ES,
const ManglingOptions &MO,
ThreadSafeModule TSM, VModuleKey K,
CompileOnDemandLayer &Parent)
: IRMaterializationUnit(ES, MO, std::move(TSM), std::move(K)),
Parent(Parent) {}

PartitioningIRMaterializationUnit(
Expand Down Expand Up @@ -111,7 +113,8 @@ CompileOnDemandLayer::compileWholeModule(GlobalValueSet Requested) {
CompileOnDemandLayer::CompileOnDemandLayer(
ExecutionSession &ES, IRLayer &BaseLayer, LazyCallThroughManager &LCTMgr,
IndirectStubsManagerBuilder BuildIndirectStubsManager)
: IRLayer(ES), BaseLayer(BaseLayer), LCTMgr(LCTMgr),
: IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
LCTMgr(LCTMgr),
BuildIndirectStubsManager(std::move(BuildIndirectStubsManager)) {}

void CompileOnDemandLayer::setPartitionFunction(PartitionFunction Partition) {
Expand All @@ -136,27 +139,23 @@ void CompileOnDemandLayer::emit(MaterializationResponsibility R,
TSM.withModuleDo([&](Module &M) {
// First, do some cleanup on the module:
cleanUpModule(M);

MangleAndInterner Mangle(ES, M.getDataLayout());
for (auto &GV : M.global_values()) {
if (GV.isDeclaration() || GV.hasLocalLinkage() ||
GV.hasAppendingLinkage())
continue;

auto Name = Mangle(GV.getName());
auto Flags = JITSymbolFlags::fromGlobalValue(GV);
if (Flags.isCallable())
Callables[Name] = SymbolAliasMapEntry(Name, Flags);
else
NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
}
});

for (auto &KV : R.getSymbols()) {
auto &Name = KV.first;
auto &Flags = KV.second;
if (Flags.isCallable())
Callables[Name] = SymbolAliasMapEntry(Name, Flags);
else
NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
}

// Create a partitioning materialization unit and lodge it with the
// implementation dylib.
if (auto Err = PDR.getImplDylib().define(
std::make_unique<PartitioningIRMaterializationUnit>(
ES, std::move(TSM), R.getVModuleKey(), *this))) {
ES, *getManglingOptions(), std::move(TSM), R.getVModuleKey(),
*this))) {
ES.reportError(std::move(Err));
R.failMaterialization();
return;
Expand Down Expand Up @@ -316,7 +315,7 @@ void CompileOnDemandLayer::emitPartition(
}

R.replace(std::make_unique<PartitioningIRMaterializationUnit>(
ES, std::move(TSM), R.getVModuleKey(), *this));
ES, *getManglingOptions(), std::move(TSM), R.getVModuleKey(), *this));
BaseLayer.emit(std::move(R), std::move(*ExtractedTSM));
}

Expand Down
31 changes: 20 additions & 11 deletions llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,17 @@
namespace llvm {
namespace orc {

IRMaterializationUnit::ManglingOptions
irManglingOptionsFromTargetOptions(const TargetOptions &Opts) {
IRMaterializationUnit::ManglingOptions MO;

MO.EmulatedTLS = Opts.EmulatedTLS;

return MO;
}

/// Compile a Module to an ObjectFile.
SimpleCompiler::CompileResult SimpleCompiler::operator()(Module &M) {
Expected<SimpleCompiler::CompileResult> SimpleCompiler::operator()(Module &M) {
CompileResult CachedObject = tryToLoadFromObjectCache(M);
if (CachedObject)
return CachedObject;
Expand All @@ -38,7 +47,8 @@ SimpleCompiler::CompileResult SimpleCompiler::operator()(Module &M) {
legacy::PassManager PM;
MCContext *Ctx;
if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
llvm_unreachable("Target does not support MC emission.");
return make_error<StringError>("Target does not support MC emission",
inconvertibleErrorCode());
PM.run(M);
}

Expand All @@ -47,14 +57,11 @@ SimpleCompiler::CompileResult SimpleCompiler::operator()(Module &M) {

auto Obj = object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());

if (Obj) {
notifyObjectCompiled(M, *ObjBuffer);
return std::move(ObjBuffer);
}
if (!Obj)
return Obj.takeError();

// TODO: Actually report errors helpfully.
consumeError(Obj.takeError());
return nullptr;
notifyObjectCompiled(M, *ObjBuffer);
return std::move(ObjBuffer);
}

SimpleCompiler::CompileResult
Expand All @@ -73,9 +80,11 @@ void SimpleCompiler::notifyObjectCompiled(const Module &M,

ConcurrentIRCompiler::ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
ObjectCache *ObjCache)
: JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
: IRCompiler(irManglingOptionsFromTargetOptions(JTMB.getOptions())),
JTMB(std::move(JTMB)), ObjCache(ObjCache) {}

std::unique_ptr<MemoryBuffer> ConcurrentIRCompiler::operator()(Module &M) {
Expected<std::unique_ptr<MemoryBuffer>>
ConcurrentIRCompiler::operator()(Module &M) {
auto TM = cantFail(JTMB.createTargetMachine());
SimpleCompiler C(*TM, ObjCache);
return C(M);
Expand Down
Loading

0 comments on commit ce2207a

Please sign in to comment.