Vendor import of llvm-project branch release/10.x

llvmorg-10.0.1-rc1-0-gf79cd71e145.
This commit is contained in:
Dimitry Andric 2020-05-22 16:12:18 +00:00
parent 4c8711804d
commit ec2b0f99f2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/vendor/llvm-project/release-10.x/; revision=361387
svn path=/vendor/llvm-project/llvmorg-10.0.1-rc1-0-gf79cd71e145/; revision=361388; tag=vendor/llvm-project/llvmorg-10.0.1-rc1-0-gf79cd71e145
50 changed files with 691 additions and 310 deletions

View file

@ -856,14 +856,15 @@ class alignas(8) Decl {
return getParentFunctionOrMethod() == nullptr;
}
/// Returns true if this declaration lexically is inside a function.
/// It recognizes non-defining declarations as well as members of local
/// classes:
/// Returns true if this declaration is lexically inside a function or inside
/// a variable initializer. It recognizes non-defining declarations as well
/// as members of local classes:
/// \code
/// void foo() { void bar(); }
/// void foo2() { class ABC { void bar(); }; }
/// inline int x = [](){ return 0; };
/// \endcode
bool isLexicallyWithinFunctionOrMethod() const;
bool isInLocalScope() const;
/// If this decl is defined inside a function/method/block it returns
/// the corresponding DeclContext, otherwise it returns null.

View file

@ -685,7 +685,7 @@ def XRayLogArgs : InheritableAttr {
def PatchableFunctionEntry
: InheritableAttr,
TargetSpecificAttr<TargetArch<["aarch64", "x86", "x86_64"]>> {
TargetSpecificAttr<TargetArch<["aarch64", "aarch64_be", "x86", "x86_64"]>> {
let Spellings = [GCC<"patchable_function_entry">];
let Subjects = SubjectList<[Function, ObjCMethod]>;
let Args = [UnsignedArgument<"Count">, DefaultIntArgument<"Offset", 0>];

View file

@ -332,13 +332,16 @@ void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
}
}
bool Decl::isLexicallyWithinFunctionOrMethod() const {
bool Decl::isInLocalScope() const {
const DeclContext *LDC = getLexicalDeclContext();
while (true) {
if (LDC->isFunctionOrMethod())
return true;
if (!isa<TagDecl>(LDC))
return false;
if (const auto *CRD = dyn_cast<CXXRecordDecl>(LDC))
if (CRD->isLambda())
return true;
LDC = LDC->getLexicalParent();
}
return false;

View file

@ -8593,6 +8593,10 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
APValue &Result, const InitListExpr *ILE,
QualType AllocType);
static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
APValue &Result,
const CXXConstructExpr *CCE,
QualType AllocType);
bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
if (!Info.getLangOpts().CPlusPlus2a)
@ -8642,6 +8646,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
const Expr *Init = E->getInitializer();
const InitListExpr *ResizedArrayILE = nullptr;
const CXXConstructExpr *ResizedArrayCCE = nullptr;
QualType AllocType = E->getAllocatedType();
if (Optional<const Expr*> ArraySize = E->getArraySize()) {
@ -8685,7 +8690,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// -- the new-initializer is a braced-init-list and the number of
// array elements for which initializers are provided [...]
// exceeds the number of elements to initialize
if (Init) {
if (Init && !isa<CXXConstructExpr>(Init)) {
auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType());
assert(CAT && "unexpected type for array initializer");
@ -8708,6 +8713,8 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// special handling for this case when we initialize.
if (InitBound != AllocBound)
ResizedArrayILE = cast<InitListExpr>(Init);
} else if (Init) {
ResizedArrayCCE = cast<CXXConstructExpr>(Init);
}
AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
@ -8772,6 +8779,10 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
if (!EvaluateArrayNewInitList(Info, Result, *Val, ResizedArrayILE,
AllocType))
return false;
} else if (ResizedArrayCCE) {
if (!EvaluateArrayNewConstructExpr(Info, Result, *Val, ResizedArrayCCE,
AllocType))
return false;
} else if (Init) {
if (!EvaluateInPlace(*Val, Info, Result, Init))
return false;
@ -9597,6 +9608,16 @@ static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
.VisitInitListExpr(ILE, AllocType);
}
static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
APValue &Result,
const CXXConstructExpr *CCE,
QualType AllocType) {
assert(CCE->isRValue() && CCE->getType()->isArrayType() &&
"not an array rvalue");
return ArrayExprEvaluator(Info, This, Result)
.VisitCXXConstructExpr(CCE, This, &Result, AllocType);
}
// Return true iff the given array filler may depend on the element index.
static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
// For now, just whitelist non-class value-initialization and initialization

View file

@ -430,7 +430,7 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
};
auto DropTrailingNewLines = [](std::string &Str) {
while (Str.back() == '\n')
while (!Str.empty() && Str.back() == '\n')
Str.pop_back();
};

View file

@ -437,7 +437,8 @@ void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
// Finish the function to maintain CodeGenFunction invariants.
// FIXME: Don't emit unreachable code.
EmitBlock(createBasicBlock());
FinishFunction();
FinishThunk();
}
void CodeGenFunction::generateThunk(llvm::Function *Fn,
@ -564,7 +565,7 @@ llvm::Constant *CodeGenVTables::maybeEmitThunk(GlobalDecl GD,
CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
// Thunks for variadic methods are special because in general variadic
// arguments cannot be perferctly forwarded. In the general case, clang
// arguments cannot be perfectly forwarded. In the general case, clang
// implements such thunks by cloning the original function body. However, for
// thunks with no return adjustment on targets that support musttail, we can
// use musttail to perfectly forward the variadic arguments.

View file

@ -1847,9 +1847,16 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
else if (const auto *SA = FD->getAttr<SectionAttr>())
F->setSection(SA->getName());
// If we plan on emitting this inline builtin, we can't treat it as a builtin.
if (FD->isInlineBuiltinDeclaration()) {
F->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoBuiltin);
const FunctionDecl *FDBody;
bool HasBody = FD->hasBody(FDBody);
(void)HasBody;
assert(HasBody && "Inline builtin declarations should always have an "
"available body!");
if (shouldEmitFunction(FDBody))
F->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoBuiltin);
}
if (FD->isReplaceableGlobalAllocationFunction()) {

View file

@ -1146,6 +1146,7 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
addExportedSymbol(CmdArgs, "___gcov_flush");
addExportedSymbol(CmdArgs, "_flush_fn_list");
addExportedSymbol(CmdArgs, "_writeout_fn_list");
addExportedSymbol(CmdArgs, "_reset_fn_list");
} else {
addExportedSymbol(CmdArgs, "___llvm_profile_filename");
addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");

View file

@ -309,7 +309,7 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
}
}
static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
static bool getPIE(const ArgList &Args, const ToolChain &TC) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_r) || Args.hasArg(options::OPT_static_pie))
return false;
@ -317,17 +317,16 @@ static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
Arg *A = Args.getLastArg(options::OPT_pie, options::OPT_no_pie,
options::OPT_nopie);
if (!A)
return ToolChain.isPIEDefault();
return TC.isPIEDefault();
return A->getOption().matches(options::OPT_pie);
}
static bool getStaticPIE(const ArgList &Args,
const toolchains::Linux &ToolChain) {
static bool getStaticPIE(const ArgList &Args, const ToolChain &TC) {
bool HasStaticPIE = Args.hasArg(options::OPT_static_pie);
// -no-pie is an alias for -nopie. So, handling -nopie takes care of
// -no-pie as well.
if (HasStaticPIE && Args.hasArg(options::OPT_nopie)) {
const Driver &D = ToolChain.getDriver();
const Driver &D = TC.getDriver();
const llvm::opt::OptTable &Opts = D.getOpts();
const char *StaticPIEName = Opts.getOptionName(options::OPT_static_pie);
const char *NoPIEName = Opts.getOptionName(options::OPT_nopie);
@ -346,8 +345,12 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
const toolchains::Linux &ToolChain =
static_cast<const toolchains::Linux &>(getToolChain());
// FIXME: The Linker class constructor takes a ToolChain and not a
// Generic_ELF, so the static_cast might return a reference to a invalid
// instance (see PR45061). Ideally, the Linker constructor needs to take a
// Generic_ELF instead.
const toolchains::Generic_ELF &ToolChain =
static_cast<const toolchains::Generic_ELF &>(getToolChain());
const Driver &D = ToolChain.getDriver();
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
@ -418,8 +421,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (isAndroid)
CmdArgs.push_back("--warn-shared-textrel");
for (const auto &Opt : ToolChain.ExtraOpts)
CmdArgs.push_back(Opt.c_str());
ToolChain.addExtraOpts(CmdArgs);
CmdArgs.push_back("--eh-frame-hdr");

View file

@ -356,6 +356,12 @@ class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const {
return {};
}
virtual void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {}
};
} // end namespace toolchains

View file

@ -61,8 +61,7 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
return Triple.isArch32Bit() ? "lib" : "lib64";
}
Hurd::Hurd(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
std::string SysRoot = computeSysRoot();
path_list &Paths = getFilePaths();
@ -170,3 +169,8 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
}
void Hurd::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
for (const auto &Opt : ExtraOpts)
CmdArgs.push_back(Opt.c_str());
}

View file

@ -27,9 +27,11 @@ class LLVM_LIBRARY_VISIBILITY Hurd : public Generic_ELF {
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
virtual std::string computeSysRoot() const;
std::string computeSysRoot() const;
virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const;
std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const override;
std::vector<std::string> ExtraOpts;

View file

@ -986,3 +986,8 @@ void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
ToolChain::addProfileRTLibs(Args, CmdArgs);
}
void Linux::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
for (const auto &Opt : ExtraOpts)
CmdArgs.push_back(Opt.c_str());
}

View file

@ -42,7 +42,9 @@ class LLVM_LIBRARY_VISIBILITY Linux : public Generic_ELF {
llvm::opt::ArgStringList &CmdArgs) const override;
virtual std::string computeSysRoot() const;
virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const;
std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const override;
std::vector<std::string> ExtraOpts;

View file

@ -2176,6 +2176,10 @@ static bool isFunctionDeclarationName(const FormatToken &Current,
Next = Next->Next;
continue;
}
if (Next->is(TT_TemplateOpener) && Next->MatchingParen) {
Next = Next->MatchingParen;
continue;
}
break;
}
@ -2705,20 +2709,40 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
tok::l_square));
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
if (Right.isOneOf(tok::star, tok::amp, tok::ampamp) &&
(Left.is(tok::identifier) || Left.isSimpleTypeSpecifier()) &&
// Space between the type and the * in:
// operator void*()
// operator char*()
// operator /*comment*/ const char*()
// operator volatile /*comment*/ char*()
// operator Foo*()
// dependent on PointerAlignment style.
Left.Previous &&
(Left.Previous->endsSequence(tok::kw_operator) ||
Left.Previous->endsSequence(tok::kw_const, tok::kw_operator) ||
Left.Previous->endsSequence(tok::kw_volatile, tok::kw_operator)))
return (Style.PointerAlignment != FormatStyle::PAS_Left);
if (Right.is(tok::star) && Left.is(tok::star))
return false;
if (Right.isOneOf(tok::star, tok::amp, tok::ampamp)) {
const FormatToken *Previous = &Left;
while (Previous && !Previous->is(tok::kw_operator)) {
if (Previous->is(tok::identifier) || Previous->isSimpleTypeSpecifier()) {
Previous = Previous->getPreviousNonComment();
continue;
}
if (Previous->is(TT_TemplateCloser) && Previous->MatchingParen) {
Previous = Previous->MatchingParen->getPreviousNonComment();
continue;
}
if (Previous->is(tok::coloncolon)) {
Previous = Previous->getPreviousNonComment();
continue;
}
break;
}
// Space between the type and the * in:
// operator void*()
// operator char*()
// operator /*comment*/ const char*()
// operator volatile /*comment*/ char*()
// operator Foo*()
// operator C<T>*()
// operator std::Foo*()
// operator C<T>::D<U>*()
// dependent on PointerAlignment style.
if (Previous && (Previous->endsSequence(tok::kw_operator) ||
Previous->endsSequence(tok::kw_const, tok::kw_operator) ||
Previous->endsSequence(tok::kw_volatile, tok::kw_operator)))
return (Style.PointerAlignment != FormatStyle::PAS_Left);
}
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
return Style.SpacesInContainerLiterals ||

View file

@ -2343,7 +2343,7 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg()) {
FunctionDecl *OwningFunc = cast<FunctionDecl>(OldParm->getDeclContext());
if (OwningFunc->isLexicallyWithinFunctionOrMethod()) {
if (OwningFunc->isInLocalScope()) {
// Instantiate default arguments for methods of local classes (DR1484)
// and non-defining declarations.
Sema::ContextRAII SavedContext(*this, OwningFunc);

View file

@ -4367,7 +4367,7 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
EPI.ExceptionSpec.Type != EST_None &&
EPI.ExceptionSpec.Type != EST_DynamicNone &&
EPI.ExceptionSpec.Type != EST_BasicNoexcept &&
!Tmpl->isLexicallyWithinFunctionOrMethod()) {
!Tmpl->isInLocalScope()) {
FunctionDecl *ExceptionSpecTemplate = Tmpl;
if (EPI.ExceptionSpec.Type == EST_Uninstantiated)
ExceptionSpecTemplate = EPI.ExceptionSpec.SourceTemplate;

View file

@ -4022,50 +4022,8 @@ template<typename Derived>
void TreeTransform<Derived>::InventTemplateArgumentLoc(
const TemplateArgument &Arg,
TemplateArgumentLoc &Output) {
SourceLocation Loc = getDerived().getBaseLocation();
switch (Arg.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("null template argument in TreeTransform");
break;
case TemplateArgument::Type:
Output = TemplateArgumentLoc(Arg,
SemaRef.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
break;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion: {
NestedNameSpecifierLocBuilder Builder;
TemplateName Template = Arg.getAsTemplateOrTemplatePattern();
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
Builder.MakeTrivial(SemaRef.Context, DTN->getQualifier(), Loc);
else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Builder.MakeTrivial(SemaRef.Context, QTN->getQualifier(), Loc);
if (Arg.getKind() == TemplateArgument::Template)
Output = TemplateArgumentLoc(Arg,
Builder.getWithLocInContext(SemaRef.Context),
Loc);
else
Output = TemplateArgumentLoc(Arg,
Builder.getWithLocInContext(SemaRef.Context),
Loc, Loc);
break;
}
case TemplateArgument::Expression:
Output = TemplateArgumentLoc(Arg, Arg.getAsExpr());
break;
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
case TemplateArgument::Pack:
case TemplateArgument::NullPtr:
Output = TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
break;
}
Output = getSema().getTrivialTemplateArgumentLoc(
Arg, QualType(), getDerived().getBaseLocation());
}
template<typename Derived>
@ -4075,12 +4033,45 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
const TemplateArgument &Arg = Input.getArgument();
switch (Arg.getKind()) {
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Pack:
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
llvm_unreachable("Unexpected TemplateArgument");
case TemplateArgument::Integral:
case TemplateArgument::NullPtr:
case TemplateArgument::Declaration: {
// Transform a resolved template argument straight to a resolved template
// argument. We get here when substituting into an already-substituted
// template type argument during concept satisfaction checking.
QualType T = Arg.getNonTypeTemplateArgumentType();
QualType NewT = getDerived().TransformType(T);
if (NewT.isNull())
return true;
ValueDecl *D = Arg.getKind() == TemplateArgument::Declaration
? Arg.getAsDecl()
: nullptr;
ValueDecl *NewD = D ? cast_or_null<ValueDecl>(getDerived().TransformDecl(
getDerived().getBaseLocation(), D))
: nullptr;
if (D && !NewD)
return true;
if (NewT == T && D == NewD)
Output = Input;
else if (Arg.getKind() == TemplateArgument::Integral)
Output = TemplateArgumentLoc(
TemplateArgument(getSema().Context, Arg.getAsIntegral(), NewT),
TemplateArgumentLocInfo());
else if (Arg.getKind() == TemplateArgument::NullPtr)
Output = TemplateArgumentLoc(TemplateArgument(NewT, /*IsNullPtr=*/true),
TemplateArgumentLocInfo());
else
Output = TemplateArgumentLoc(TemplateArgument(NewD, NewT),
TemplateArgumentLocInfo());
return false;
}
case TemplateArgument::Type: {
TypeSourceInfo *DI = Input.getTypeSourceInfo();
if (!DI)
@ -11837,19 +11828,6 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
LSI->CallOperator = NewCallOperator;
for (unsigned I = 0, NumParams = NewCallOperator->getNumParams();
I != NumParams; ++I) {
auto *P = NewCallOperator->getParamDecl(I);
if (P->hasUninstantiatedDefaultArg()) {
EnterExpressionEvaluationContext Eval(
getSema(),
Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed, P);
ExprResult R = getDerived().TransformExpr(
E->getCallOperator()->getParamDecl(I)->getDefaultArg());
P->setDefaultArg(R.get());
}
}
getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
getDerived().transformedLocalDecl(E->getCallOperator(), {NewCallOperator});

View file

@ -32,8 +32,10 @@
#include <windows.h>
#include "WindowsMMap.h"
#else
#include <sys/mman.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#endif
#if defined(__FreeBSD__) && defined(__i386__)
@ -119,6 +121,11 @@ struct fn_list writeout_fn_list;
*/
struct fn_list flush_fn_list;
/*
* A list of reset functions, shared between all dynamic objects.
*/
struct fn_list reset_fn_list;
static void fn_list_insert(struct fn_list* list, fn_ptr fn) {
struct fn_node* new_node = malloc(sizeof(struct fn_node));
new_node->fn = fn;
@ -634,7 +641,46 @@ void llvm_delete_flush_function_list(void) {
}
COMPILER_RT_VISIBILITY
void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn) {
void llvm_register_reset_function(fn_ptr fn) {
fn_list_insert(&reset_fn_list, fn);
}
COMPILER_RT_VISIBILITY
void llvm_delete_reset_function_list(void) { fn_list_remove(&reset_fn_list); }
COMPILER_RT_VISIBILITY
void llvm_reset_counters(void) {
struct fn_node *curr = reset_fn_list.head;
while (curr) {
if (curr->id == CURRENT_ID) {
curr->fn();
}
curr = curr->next;
}
}
#if !defined(_WIN32)
COMPILER_RT_VISIBILITY
pid_t __gcov_fork() {
pid_t parent_pid = getpid();
pid_t pid = fork();
if (pid == 0) {
pid_t child_pid = getpid();
if (child_pid != parent_pid) {
// The pid changed so we've a fork (one could have its own fork function)
// Just reset the counters for this child process
// threads.
llvm_reset_counters();
}
}
return pid;
}
#endif
COMPILER_RT_VISIBILITY
void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn, fn_ptr rfn) {
static int atexit_ran = 0;
if (wfn)
@ -643,10 +689,14 @@ void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn) {
if (ffn)
llvm_register_flush_function(ffn);
if (rfn)
llvm_register_reset_function(rfn);
if (atexit_ran == 0) {
atexit_ran = 1;
/* Make sure we write out the data and delete the data structures. */
atexit(llvm_delete_reset_function_list);
atexit(llvm_delete_flush_function_list);
atexit(llvm_delete_writeout_function_list);
atexit(llvm_writeout_files);

View file

@ -28,10 +28,12 @@ void markLive(ArrayRef<Chunk *> chunks) {
// as we push, so sections never appear twice in the list.
SmallVector<SectionChunk *, 256> worklist;
// COMDAT section chunks are dead by default. Add non-COMDAT chunks.
// COMDAT section chunks are dead by default. Add non-COMDAT chunks. Do not
// traverse DWARF sections. They are live, but they should not keep other
// sections alive.
for (Chunk *c : chunks)
if (auto *sc = dyn_cast<SectionChunk>(c))
if (sc->live)
if (sc->live && !sc->isDWARF())
worklist.push_back(sc);
auto enqueue = [&](SectionChunk *c) {

View file

@ -1906,8 +1906,17 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// We do not want to emit debug sections if --strip-all
// or -strip-debug are given.
return config->strip != StripPolicy::None &&
(s->name.startswith(".debug") || s->name.startswith(".zdebug"));
if (config->strip == StripPolicy::None)
return false;
if (isDebugSection(*s))
return true;
if (auto *isec = dyn_cast<InputSection>(s))
if (InputSectionBase *rel = isec->getRelocatedSection())
if (isDebugSection(*rel))
return true;
return false;
});
// Now that the number of partitions is fixed, save a pointer to the main

View file

@ -441,8 +441,7 @@ void InputSection::copyRelocations(uint8_t *buf, ArrayRef<RelTy> rels) {
// See the comment in maybeReportUndefined for PPC64 .toc .
auto *d = dyn_cast<Defined>(&sym);
if (!d) {
if (!sec->name.startswith(".debug") &&
!sec->name.startswith(".zdebug") && sec->name != ".eh_frame" &&
if (!isDebugSection(*sec) && sec->name != ".eh_frame" &&
sec->name != ".gcc_except_table" && sec->name != ".toc") {
uint32_t secIdx = cast<Undefined>(sym).discardedSecIdx;
Elf_Shdr_Impl<ELFT> sec =

View file

@ -357,6 +357,10 @@ class InputSection : public InputSectionBase {
template <class ELFT> void copyShtGroup(uint8_t *buf);
};
inline bool isDebugSection(const InputSectionBase &sec) {
return sec.name.startswith(".debug") || sec.name.startswith(".zdebug");
}
// The list of all input sections.
extern std::vector<InputSectionBase *> inputSections;

View file

@ -114,8 +114,7 @@ void OutputSection::commitSection(InputSection *isec) {
flags = isec->flags;
} else {
// Otherwise, check if new type or flags are compatible with existing ones.
unsigned mask = SHF_TLS | SHF_LINK_ORDER;
if ((flags & mask) != (isec->flags & mask))
if ((flags ^ isec->flags) & SHF_TLS)
error("incompatible section flags for " + name + "\n>>> " + toString(isec) +
": 0x" + utohexstr(isec->flags) + "\n>>> output section " + name +
": 0x" + utohexstr(flags));
@ -367,8 +366,9 @@ void OutputSection::finalize() {
// all InputSections in the OutputSection have the same dependency.
if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
link = ex->getLinkOrderDep()->getParent()->sectionIndex;
else if (auto *d = first->getLinkOrderDep())
link = d->getParent()->sectionIndex;
else if (first->flags & SHF_LINK_ORDER)
if (auto *d = first->getLinkOrderDep())
link = d->getParent()->sectionIndex;
}
if (type == SHT_GROUP) {

View file

@ -1523,17 +1523,30 @@ template <class ELFT> void Writer<ELFT>::resolveShfLinkOrder() {
// but sort must consider them all at once.
std::vector<InputSection **> scriptSections;
std::vector<InputSection *> sections;
bool started = false, stopped = false;
for (BaseCommand *base : sec->sectionCommands) {
if (auto *isd = dyn_cast<InputSectionDescription>(base)) {
for (InputSection *&isec : isd->sections) {
scriptSections.push_back(&isec);
sections.push_back(isec);
if (!(isec->flags & SHF_LINK_ORDER)) {
if (started)
stopped = true;
} else if (stopped) {
error(toString(isec) + ": SHF_LINK_ORDER sections in " + sec->name +
" are not contiguous");
} else {
started = true;
InputSection *link = isec->getLinkOrderDep();
if (!link->getParent())
error(toString(isec) + ": sh_link points to discarded section " +
toString(link));
scriptSections.push_back(&isec);
sections.push_back(isec);
InputSection *link = isec->getLinkOrderDep();
if (!link->getParent())
error(toString(isec) + ": sh_link points to discarded section " +
toString(link));
}
}
} else if (started) {
stopped = true;
}
}

View file

@ -29,7 +29,7 @@ class ValueLatticeElement {
/// producing instruction is dead. Caution: We use this as the starting
/// state in our local meet rules. In this usage, it's taken to mean
/// "nothing known yet".
undefined,
unknown,
/// This Value has a specific constant value. (For constant integers,
/// constantrange is used instead. Integer typed constantexprs can appear
@ -45,7 +45,12 @@ class ValueLatticeElement {
constantrange,
/// We can not precisely model the dynamic values this value might take.
overdefined
overdefined,
/// This Value is an UndefValue constant or produces undef. Undefined values
/// can be merged with constants (or single element constant ranges),
/// assuming all uses of the result will be replaced.
undef
};
ValueLatticeElementTy Tag;
@ -60,14 +65,15 @@ class ValueLatticeElement {
public:
// Const and Range are initialized on-demand.
ValueLatticeElement() : Tag(undefined) {}
ValueLatticeElement() : Tag(unknown) {}
/// Custom destructor to ensure Range is properly destroyed, when the object
/// is deallocated.
~ValueLatticeElement() {
switch (Tag) {
case overdefined:
case undefined:
case unknown:
case undef:
case constant:
case notconstant:
break;
@ -79,7 +85,7 @@ class ValueLatticeElement {
/// Custom copy constructor, to ensure Range gets initialized when
/// copying a constant range lattice element.
ValueLatticeElement(const ValueLatticeElement &Other) : Tag(undefined) {
ValueLatticeElement(const ValueLatticeElement &Other) : Tag(unknown) {
*this = Other;
}
@ -109,7 +115,8 @@ class ValueLatticeElement {
ConstVal = Other.ConstVal;
break;
case overdefined:
case undefined:
case unknown:
case undef:
break;
}
Tag = Other.Tag;
@ -118,14 +125,16 @@ class ValueLatticeElement {
static ValueLatticeElement get(Constant *C) {
ValueLatticeElement Res;
if (!isa<UndefValue>(C))
if (isa<UndefValue>(C))
Res.markUndef();
else
Res.markConstant(C);
return Res;
}
static ValueLatticeElement getNot(Constant *C) {
ValueLatticeElement Res;
if (!isa<UndefValue>(C))
Res.markNotConstant(C);
assert(!isa<UndefValue>(C) && "!= undef is not supported");
Res.markNotConstant(C);
return Res;
}
static ValueLatticeElement getRange(ConstantRange CR) {
@ -139,7 +148,10 @@ class ValueLatticeElement {
return Res;
}
bool isUndefined() const { return Tag == undefined; }
bool isUndef() const { return Tag == undef; }
bool isUnknown() const { return Tag == unknown; }
bool isUnknownOrUndef() const { return Tag == unknown || Tag == undef; }
bool isUndefined() const { return isUnknownOrUndef(); }
bool isConstant() const { return Tag == constant; }
bool isNotConstant() const { return Tag == notconstant; }
bool isConstantRange() const { return Tag == constantrange; }
@ -170,89 +182,123 @@ class ValueLatticeElement {
return None;
}
private:
void markOverdefined() {
bool markOverdefined() {
if (isOverdefined())
return;
return false;
if (isConstant() || isNotConstant())
ConstVal = nullptr;
if (isConstantRange())
Range.~ConstantRange();
Tag = overdefined;
return true;
}
void markConstant(Constant *V) {
assert(V && "Marking constant with NULL");
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
markConstantRange(ConstantRange(CI->getValue()));
return;
}
if (isa<UndefValue>(V))
return;
bool markUndef() {
if (isUndef())
return false;
assert((!isConstant() || getConstant() == V) &&
"Marking constant with different value");
assert(isUndefined());
assert(isUnknown());
Tag = undef;
return true;
}
bool markConstant(Constant *V) {
if (isa<UndefValue>(V))
return markUndef();
if (isConstant()) {
assert(getConstant() == V && "Marking constant with different value");
return false;
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
return markConstantRange(ConstantRange(CI->getValue()));
assert(isUnknown() || isUndef());
Tag = constant;
ConstVal = V;
return true;
}
void markNotConstant(Constant *V) {
bool markNotConstant(Constant *V) {
assert(V && "Marking constant with NULL");
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
markConstantRange(ConstantRange(CI->getValue() + 1, CI->getValue()));
return;
}
if (isa<UndefValue>(V))
return;
if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
return markConstantRange(
ConstantRange(CI->getValue() + 1, CI->getValue()));
assert((!isConstant() || getConstant() != V) &&
"Marking constant !constant with same value");
assert((!isNotConstant() || getNotConstant() == V) &&
"Marking !constant with different value");
assert(isUndefined() || isConstant());
if (isa<UndefValue>(V))
return false;
if (isNotConstant()) {
assert(getNotConstant() == V && "Marking !constant with different value");
return false;
}
assert(isUnknown());
Tag = notconstant;
ConstVal = V;
return true;
}
void markConstantRange(ConstantRange NewR) {
/// Mark the object as constant range with \p NewR. If the object is already a
/// constant range, nothing changes if the existing range is equal to \p
/// NewR. Otherwise \p NewR must be a superset of the existing range or the
/// object must be undef.
bool markConstantRange(ConstantRange NewR) {
if (isConstantRange()) {
if (getConstantRange() == NewR)
return false;
if (NewR.isEmptySet())
markOverdefined();
else {
Range = std::move(NewR);
}
return;
return markOverdefined();
assert(NewR.contains(getConstantRange()) &&
"Existing range must be a subset of NewR");
Range = std::move(NewR);
return true;
}
assert(isUndefined());
assert(isUnknown() || isUndef());
if (NewR.isEmptySet())
markOverdefined();
else {
Tag = constantrange;
new (&Range) ConstantRange(std::move(NewR));
}
return markOverdefined();
Tag = constantrange;
new (&Range) ConstantRange(std::move(NewR));
return true;
}
public:
/// Updates this object to approximate both this object and RHS. Returns
/// true if this object has been changed.
bool mergeIn(const ValueLatticeElement &RHS, const DataLayout &DL) {
if (RHS.isUndefined() || isOverdefined())
if (RHS.isUnknown() || isOverdefined())
return false;
if (RHS.isOverdefined()) {
markOverdefined();
return true;
}
if (isUndefined()) {
if (isUndef()) {
assert(!RHS.isUnknown());
if (RHS.isUndef())
return false;
if (RHS.isConstant())
return markConstant(RHS.getConstant());
if (RHS.isConstantRange() && RHS.getConstantRange().isSingleElement())
return markConstantRange(RHS.getConstantRange());
return markOverdefined();
}
if (isUnknown()) {
assert(!RHS.isUnknown() && "Unknow RHS should be handled earlier");
*this = RHS;
return !RHS.isUndefined();
return true;
}
if (isConstant()) {
if (RHS.isConstant() && getConstant() == RHS.getConstant())
return false;
if (RHS.isUndef())
return false;
markOverdefined();
return true;
}
@ -265,6 +311,9 @@ class ValueLatticeElement {
}
assert(isConstantRange() && "New ValueLattice type?");
if (RHS.isUndef() && getConstantRange().isSingleElement())
return false;
if (!RHS.isConstantRange()) {
// We can get here if we've encountered a constantexpr of integer type
// and merge it with a constantrange.
@ -273,18 +322,11 @@ class ValueLatticeElement {
}
ConstantRange NewR = getConstantRange().unionWith(RHS.getConstantRange());
if (NewR.isFullSet())
markOverdefined();
return markOverdefined();
else if (NewR == getConstantRange())
return false;
else
markConstantRange(std::move(NewR));
return true;
}
ConstantInt *getConstantInt() const {
assert(isConstant() && isa<ConstantInt>(getConstant()) &&
"No integer constant");
return cast<ConstantInt>(getConstant());
return markConstantRange(std::move(NewR));
}
/// Compares this symbolic value with Other using Pred and returns either
@ -292,7 +334,7 @@ class ValueLatticeElement {
/// evaluated.
Constant *getCompare(CmpInst::Predicate Pred, Type *Ty,
const ValueLatticeElement &Other) const {
if (isUndefined() || Other.isUndefined())
if (isUnknownOrUndef() || Other.isUnknownOrUndef())
return UndefValue::get(Ty);
if (isConstant() && Other.isConstant())

View file

@ -71,6 +71,11 @@ class GlobalTypeTableBuilder : public TypeCollection {
template <typename CreateFunc>
TypeIndex insertRecordAs(GloballyHashedType Hash, size_t RecordSize,
CreateFunc Create) {
assert(RecordSize < UINT32_MAX && "Record too big");
assert(RecordSize % 4 == 0 &&
"RecordSize is not a multiple of 4 bytes which will cause "
"misalignment in the output TPI stream!");
auto Result = HashedRecords.try_emplace(Hash, nextTypeIndex());
if (LLVM_UNLIKELY(Result.second /*inserted*/ ||

View file

@ -96,9 +96,9 @@ static ValueLatticeElement intersect(const ValueLatticeElement &A,
const ValueLatticeElement &B) {
// Undefined is the strongest state. It means the value is known to be along
// an unreachable path.
if (A.isUndefined())
if (A.isUnknown())
return A;
if (B.isUndefined())
if (B.isUnknown())
return B;
// If we gave up for one, but got a useable fact from the other, use it.
@ -1203,7 +1203,7 @@ static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
// false SETNE.
if (isTrueDest == (Predicate == ICmpInst::ICMP_EQ))
return ValueLatticeElement::get(cast<Constant>(RHS));
else
else if (!isa<UndefValue>(RHS))
return ValueLatticeElement::getNot(cast<Constant>(RHS));
}
}
@ -1722,7 +1722,7 @@ ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
const DataLayout &DL = BB->getModule()->getDataLayout();
ValueLatticeElement Result =
getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
if (Result.isUndefined())
if (Result.isUnknown())
return ConstantRange::getEmpty(Width);
if (Result.isConstantRange())
return Result.getConstantRange();
@ -1761,7 +1761,7 @@ ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
ValueLatticeElement Result =
getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
if (Result.isUndefined())
if (Result.isUnknown())
return ConstantRange::getEmpty(Width);
if (Result.isConstantRange())
return Result.getConstantRange();
@ -1991,7 +1991,7 @@ void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
for (auto &Arg : F->args()) {
ValueLatticeElement Result = LVIImpl->getValueInBlock(
const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
if (Result.isUndefined())
if (Result.isUnknown())
continue;
OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
}

View file

@ -10,8 +10,10 @@
namespace llvm {
raw_ostream &operator<<(raw_ostream &OS, const ValueLatticeElement &Val) {
if (Val.isUndefined())
return OS << "undefined";
if (Val.isUnknown())
return OS << "unknown";
if (Val.isUndef())
return OS << "undef";
if (Val.isOverdefined())
return OS << "overdefined";

View file

@ -269,30 +269,26 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
// into and they are all PHI nodes. In this case, machine-sink must break
// the critical edge first. e.g.
//
// %bb.1: derived from LLVM BB %bb4.preheader
// %bb.1:
// Predecessors according to CFG: %bb.0
// ...
// %reg16385 = DEC64_32r %reg16437, implicit-def dead %eflags
// %def = DEC64_32r %x, implicit-def dead %eflags
// ...
// JE_4 <%bb.37>, implicit %eflags
// Successors according to CFG: %bb.37 %bb.2
//
// %bb.2: derived from LLVM BB %bb.nph
// Predecessors according to CFG: %bb.0 %bb.1
// %reg16386 = PHI %reg16434, %bb.0, %reg16385, %bb.1
BreakPHIEdge = true;
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
MachineInstr *UseInst = MO.getParent();
unsigned OpNo = &MO - &UseInst->getOperand(0);
MachineBasicBlock *UseBlock = UseInst->getParent();
if (!(UseBlock == MBB && UseInst->isPHI() &&
UseInst->getOperand(OpNo+1).getMBB() == DefMBB)) {
BreakPHIEdge = false;
break;
}
}
if (BreakPHIEdge)
// %bb.2:
// %p = PHI %y, %bb.0, %def, %bb.1
if (llvm::all_of(MRI->use_nodbg_operands(Reg), [&](MachineOperand &MO) {
MachineInstr *UseInst = MO.getParent();
unsigned OpNo = UseInst->getOperandNo(&MO);
MachineBasicBlock *UseBlock = UseInst->getParent();
return UseBlock == MBB && UseInst->isPHI() &&
UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;
})) {
BreakPHIEdge = true;
return true;
}
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
// Determine the block of the use.

View file

@ -886,6 +886,13 @@ static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques = false) {
ISD::isBuildVectorOfConstantFPSDNodes(V.getNode());
}
// Determine if this an indexed load with an opaque target constant index.
static bool canSplitIdx(LoadSDNode *LD) {
return MaySplitLoadIndex &&
(LD->getOperand(2).getOpcode() != ISD::TargetConstant ||
!cast<ConstantSDNode>(LD->getOperand(2))->isOpaque());
}
bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc,
const SDLoc &DL,
SDValue N0,
@ -14222,11 +14229,11 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
auto ReplaceLd = [&](LoadSDNode *LD, SDValue Val, SDValue Chain) -> SDValue {
if (LD->isIndexed()) {
bool IsSub = (LD->getAddressingMode() == ISD::PRE_DEC ||
LD->getAddressingMode() == ISD::POST_DEC);
unsigned Opc = IsSub ? ISD::SUB : ISD::ADD;
SDValue Idx = DAG.getNode(Opc, SDLoc(LD), LD->getOperand(1).getValueType(),
LD->getOperand(1), LD->getOperand(2));
// Cannot handle opaque target constants and we must respect the user's
// request not to split indexes from loads.
if (!canSplitIdx(LD))
return SDValue();
SDValue Idx = SplitIndexingFromLoad(LD);
SDValue Ops[] = {Val, Idx, Chain};
return CombineTo(LD, Ops, 3);
}
@ -14322,14 +14329,12 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// the indexing into an add/sub directly (that TargetConstant may not be
// valid for a different type of node, and we cannot convert an opaque
// target constant into a regular constant).
bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant &&
cast<ConstantSDNode>(LD->getOperand(2))->isOpaque();
bool CanSplitIdx = canSplitIdx(LD);
if (!N->hasAnyUseOfValue(0) &&
((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) {
if (!N->hasAnyUseOfValue(0) && (CanSplitIdx || !N->hasAnyUseOfValue(1))) {
SDValue Undef = DAG.getUNDEF(N->getValueType(0));
SDValue Index;
if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) {
if (N->hasAnyUseOfValue(1) && CanSplitIdx) {
Index = SplitIndexingFromLoad(LD);
// Try to fold the base pointer arithmetic into subsequent loads and
// stores.

View file

@ -225,6 +225,21 @@ static bool isRegUsedByPhiNodes(unsigned DefReg,
return false;
}
static bool isTerminatingEHLabel(MachineBasicBlock *MBB, MachineInstr &MI) {
// Ignore non-EH labels.
if (!MI.isEHLabel())
return false;
// Any EH label outside a landing pad must be for an invoke. Consider it a
// terminator.
if (!MBB->isEHPad())
return true;
// If this is a landingpad, the first non-phi instruction will be an EH_LABEL.
// Don't consider that label to be a terminator.
return MI.getIterator() != MBB->getFirstNonPHI();
}
/// Build a map of instruction orders. Return the first terminator and its
/// order. Consider EH_LABEL instructions to be terminators as well, since local
/// values for phis after invokes must be materialized before the call.
@ -233,7 +248,7 @@ void FastISel::InstOrderMap::initialize(
unsigned Order = 0;
for (MachineInstr &I : *MBB) {
if (!FirstTerminator &&
(I.isTerminator() || (I.isEHLabel() && &I != &MBB->front()))) {
(I.isTerminator() || isTerminatingEHLabel(MBB, I))) {
FirstTerminator = &I;
FirstTerminatorOrder = Order;
}

View file

@ -90,7 +90,9 @@ static inline ArrayRef<uint8_t> stabilize(BumpPtrAllocator &Alloc,
TypeIndex MergingTypeTableBuilder::insertRecordAs(hash_code Hash,
ArrayRef<uint8_t> &Record) {
assert(Record.size() < UINT32_MAX && "Record too big");
assert(Record.size() % 4 == 0 && "Record is not aligned to 4 bytes!");
assert(Record.size() % 4 == 0 &&
"The type record size is not a multiple of 4 bytes which will cause "
"misalignment in the output TPI stream!");
LocallyHashedType WeakHash{Hash, Record};
auto Result = HashedRecords.try_emplace(WeakHash, nextTypeIndex());

View file

@ -360,16 +360,18 @@ Error TypeStreamMerger::remapType(const CVType &Type) {
[this, Type](MutableArrayRef<uint8_t> Storage) -> ArrayRef<uint8_t> {
return remapIndices(Type, Storage);
};
unsigned AlignedSize = alignTo(Type.RecordData.size(), 4);
if (LLVM_LIKELY(UseGlobalHashes)) {
GlobalTypeTableBuilder &Dest =
isIdRecord(Type.kind()) ? *DestGlobalIdStream : *DestGlobalTypeStream;
GloballyHashedType H = GlobalHashes[CurIndex.toArrayIndex()];
DestIdx = Dest.insertRecordAs(H, Type.RecordData.size(), DoSerialize);
DestIdx = Dest.insertRecordAs(H, AlignedSize, DoSerialize);
} else {
MergingTypeTableBuilder &Dest =
isIdRecord(Type.kind()) ? *DestIdStream : *DestTypeStream;
RemapStorage.resize(Type.RecordData.size());
RemapStorage.resize(AlignedSize);
ArrayRef<uint8_t> Result = DoSerialize(RemapStorage);
if (!Result.empty())
DestIdx = Dest.insertRecordBytes(Result);
@ -386,9 +388,15 @@ Error TypeStreamMerger::remapType(const CVType &Type) {
ArrayRef<uint8_t>
TypeStreamMerger::remapIndices(const CVType &OriginalType,
MutableArrayRef<uint8_t> Storage) {
unsigned Align = OriginalType.RecordData.size() & 3;
unsigned AlignedSize = alignTo(OriginalType.RecordData.size(), 4);
assert(Storage.size() == AlignedSize &&
"The storage buffer size is not a multiple of 4 bytes which will "
"cause misalignment in the output TPI stream!");
SmallVector<TiReference, 4> Refs;
discoverTypeIndices(OriginalType.RecordData, Refs);
if (Refs.empty())
if (Refs.empty() && Align == 0)
return OriginalType.RecordData;
::memcpy(Storage.data(), OriginalType.RecordData.data(),
@ -408,6 +416,16 @@ TypeStreamMerger::remapIndices(const CVType &OriginalType,
return {};
}
}
if (Align > 0) {
RecordPrefix *StorageHeader =
reinterpret_cast<RecordPrefix *>(Storage.data());
StorageHeader->RecordLen += 4 - Align;
DestContent = Storage.data() + OriginalType.RecordData.size();
for (; Align < 4; ++Align)
*DestContent++ = LF_PAD4 - Align;
}
return Storage;
}

View file

@ -44,6 +44,9 @@ void TpiStreamBuilder::setVersionHeader(PdbRaw_TpiVer Version) {
void TpiStreamBuilder::addTypeRecord(ArrayRef<uint8_t> Record,
Optional<uint32_t> Hash) {
// If we just crossed an 8KB threshold, add a type index offset.
assert(((Record.size() & 3) == 0) &&
"The type record's size is not a multiple of 4 bytes which will "
"cause misalignment in the output TPI stream!");
size_t NewSize = TypeRecordBytes + Record.size();
constexpr size_t EightKB = 8 * 1024;
if (NewSize / EightKB > TypeRecordBytes / EightKB || TypeRecords.empty()) {
@ -153,8 +156,11 @@ Error TpiStreamBuilder::commit(const msf::MSFLayout &Layout,
return EC;
for (auto Rec : TypeRecords) {
assert(!Rec.empty()); // An empty record will not write anything, but it
// would shift all offsets from here on.
assert(!Rec.empty() && "Attempting to write an empty type record shifts "
"all offsets in the TPI stream!");
assert(((Rec.size() & 3) == 0) &&
"The type record's size is not a multiple of 4 bytes which will "
"cause misalignment in the output TPI stream!");
if (auto EC = Writer.writeBytes(Rec))
return EC;
}

View file

@ -761,7 +761,6 @@ void MCObjectFileInfo::initWasmMCObjectFileInfo(const Triple &T) {
Ctx->getWasmSection(".debug_ranges", SectionKind::getMetadata());
DwarfMacinfoSection =
Ctx->getWasmSection(".debug_macinfo", SectionKind::getMetadata());
DwarfAddrSection = Ctx->getWasmSection(".debug_addr", SectionKind::getMetadata());
DwarfCUIndexSection = Ctx->getWasmSection(".debug_cu_index", SectionKind::getMetadata());
DwarfTUIndexSection = Ctx->getWasmSection(".debug_tu_index", SectionKind::getMetadata());
DwarfInfoSection =
@ -770,6 +769,17 @@ void MCObjectFileInfo::initWasmMCObjectFileInfo(const Triple &T) {
DwarfPubNamesSection = Ctx->getWasmSection(".debug_pubnames", SectionKind::getMetadata());
DwarfPubTypesSection = Ctx->getWasmSection(".debug_pubtypes", SectionKind::getMetadata());
DwarfDebugNamesSection =
Ctx->getWasmSection(".debug_names", SectionKind::getMetadata());
DwarfStrOffSection =
Ctx->getWasmSection(".debug_str_offsets", SectionKind::getMetadata());
DwarfAddrSection =
Ctx->getWasmSection(".debug_addr", SectionKind::getMetadata());
DwarfRnglistsSection =
Ctx->getWasmSection(".debug_rnglists", SectionKind::getMetadata());
DwarfLoclistsSection =
Ctx->getWasmSection(".debug_loclists", SectionKind::getMetadata());
// Wasm use data section for LSDA.
// TODO Consider putting each function's exception table in a separate
// section, as in -function-sections, to facilitate lld's --gc-section.

View file

@ -4549,7 +4549,7 @@ SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) {
EVT VT = Op.getValueType();
if (!Subtarget->hasDSP())
if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
return SDValue();
if (!VT.isSimple())
return SDValue();

View file

@ -116,26 +116,22 @@ const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
// ARMFixupKinds.h.
//
// Name Offset (bits) Size (bits) Flags
{"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
{"fixup_t2_ldst_pcrel_12", 0, 32,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
{"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
{"fixup_t2_pcrel_10", 0, 32,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_t2_pcrel_9", 0, 32,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_thumb_adr_pcrel_10", 8, 8,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
{"fixup_t2_adr_pcrel_12", 0, 32,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},

View file

@ -116,11 +116,22 @@ void BPFMISimplifyPatchable::checkADDrr(MachineRegisterInfo *MRI,
else
continue;
// It must be a form of %1 = *(type *)(%2 + 0) or *(type *)(%2 + 0) = %1.
// It must be a form of %2 = *(type *)(%1 + 0) or *(type *)(%1 + 0) = %2.
const MachineOperand &ImmOp = DefInst->getOperand(2);
if (!ImmOp.isImm() || ImmOp.getImm() != 0)
continue;
// Reject the form:
// %1 = ADD_rr %2, %3
// *(type *)(%2 + 0) = %1
if (Opcode == BPF::STB || Opcode == BPF::STH || Opcode == BPF::STW ||
Opcode == BPF::STD || Opcode == BPF::STB32 || Opcode == BPF::STH32 ||
Opcode == BPF::STW32) {
const MachineOperand &Opnd = DefInst->getOperand(0);
if (Opnd.isReg() && Opnd.getReg() == I->getReg())
continue;
}
BuildMI(*DefInst->getParent(), *DefInst, DefInst->getDebugLoc(), TII->get(COREOp))
.add(DefInst->getOperand(0)).addImm(Opcode).add(*BaseOp)
.addGlobalAddress(GVal);

View file

@ -7720,15 +7720,17 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
// Emit a store to the stack slot.
SDValue Chain;
unsigned Alignment = DAG.getEVTAlignment(Tmp.getValueType());
if (i32Stack) {
MachineFunction &MF = DAG.getMachineFunction();
Alignment = 4;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
} else
Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
// Result is a load from the stack slot. If loading 4 bytes, make sure to
// add in a bias on big endian.
@ -7741,6 +7743,7 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
RLI.Chain = Chain;
RLI.Ptr = FIPtr;
RLI.MPI = MPI;
RLI.Alignment = Alignment;
}
/// Custom lowers floating point to integer conversions to use
@ -7848,9 +7851,10 @@ bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
SelectionDAG &DAG,
ISD::LoadExtType ET) const {
SDLoc dl(Op);
bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
(Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
if (ET == ISD::NON_EXTLOAD &&
(Op.getOpcode() == ISD::FP_TO_UINT ||
Op.getOpcode() == ISD::FP_TO_SINT) &&
(ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
isOperationLegalOrCustom(Op.getOpcode(),
Op.getOperand(0).getValueType())) {
@ -13588,7 +13592,7 @@ SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
(Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
(Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() ||
if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
return SDValue();

View file

@ -33998,6 +33998,7 @@ static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
return SDValue();
// Shuffle the constant bits according to the mask.
SDLoc DL(Root);
APInt UndefElts(NumMaskElts, 0);
APInt ZeroElts(NumMaskElts, 0);
APInt ConstantElts(NumMaskElts, 0);
@ -34035,6 +34036,10 @@ static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
}
assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
// Attempt to create a zero vector.
if ((UndefElts | ZeroElts).isAllOnesValue())
return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
// Create the constant data.
MVT MaskSVT;
if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
@ -34043,8 +34048,9 @@ static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
return SDValue();
SDLoc DL(Root);
SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
return DAG.getBitcast(VT, CstOp);
}

View file

@ -18,6 +18,7 @@
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@ -102,7 +103,16 @@ bool X86IndirectBranchTrackingPass::runOnMachineFunction(MachineFunction &MF) {
// Check that the cf-protection-branch is enabled.
Metadata *isCFProtectionSupported =
MF.getMMI().getModule()->getModuleFlag("cf-protection-branch");
if (!isCFProtectionSupported && !IndirectBranchTracking)
// NB: We need to enable IBT in jitted code if JIT compiler is CET
// enabled.
const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget());
#ifdef __CET__
bool isJITwithCET = TM->isJIT();
#else
bool isJITwithCET = false;
#endif
if (!isCFProtectionSupported && !IndirectBranchTracking && !isJITwithCET)
return false;
// True if the current MF was changed and false otherwise.
@ -111,10 +121,11 @@ bool X86IndirectBranchTrackingPass::runOnMachineFunction(MachineFunction &MF) {
TII = SubTarget.getInstrInfo();
EndbrOpcode = SubTarget.is64Bit() ? X86::ENDBR64 : X86::ENDBR32;
// Non-internal function or function whose address was taken, can be
// accessed through indirect calls. Mark the first BB with ENDBR instruction
// unless nocf_check attribute is used.
if ((MF.getFunction().hasAddressTaken() ||
// Large code model, non-internal function or function whose address
// was taken, can be accessed through indirect calls. Mark the first
// BB with ENDBR instruction unless nocf_check attribute is used.
if ((TM->getCodeModel() == CodeModel::Large ||
MF.getFunction().hasAddressTaken() ||
!MF.getFunction().hasLocalLinkage()) &&
!MF.getFunction().doesNoCfCheck()) {
auto MBB = MF.begin();
@ -127,11 +138,18 @@ bool X86IndirectBranchTrackingPass::runOnMachineFunction(MachineFunction &MF) {
if (MBB.hasAddressTaken())
Changed |= addENDBR(MBB, MBB.begin());
// Exception handle may indirectly jump to catch pad, So we should add
// ENDBR before catch pad instructions.
bool EHPadIBTNeeded = MBB.isEHPad();
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
if (!I->isCall())
continue;
if (IsCallReturnTwice(I->getOperand(0)))
if (I->isCall() && IsCallReturnTwice(I->getOperand(0)))
Changed |= addENDBR(MBB, std::next(I));
if (EHPadIBTNeeded && I->isEHLabel()) {
Changed |= addENDBR(MBB, std::next(I));
EHPadIBTNeeded = false;
}
}
}
return Changed;

View file

@ -222,7 +222,7 @@ X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT,
getEffectiveRelocModel(TT, JIT, RM),
getEffectiveX86CodeModel(CM, JIT, TT.getArch() == Triple::x86_64),
OL),
TLOF(createTLOF(getTargetTriple())) {
TLOF(createTLOF(getTargetTriple())), IsJIT(JIT) {
// On PS4, the "return address" of a 'noreturn' call must still be within
// the calling function, and TrapUnreachable is an easy way to get that.
if (TT.isPS4() || TT.isOSBinFormatMachO()) {

View file

@ -30,6 +30,8 @@ class TargetTransformInfo;
class X86TargetMachine final : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
mutable StringMap<std::unique_ptr<X86Subtarget>> SubtargetMap;
// True if this is used in JIT.
bool IsJIT;
public:
X86TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
@ -52,6 +54,8 @@ class X86TargetMachine final : public LLVMTargetMachine {
TargetLoweringObjectFile *getObjFileLowering() const override {
return TLOF.get();
}
bool isJIT() const { return IsJIT; }
};
} // end namespace llvm

View file

@ -1155,7 +1155,10 @@ static void simplifySuspendPoints(coro::Shape &Shape) {
if (N == 0)
return;
while (true) {
if (simplifySuspendPoint(cast<CoroSuspendInst>(S[I]), Shape.CoroBegin)) {
auto SI = cast<CoroSuspendInst>(S[I]);
// Leave final.suspend to handleFinalSuspend since it is undefined behavior
// to resume a coroutine suspended at the final suspend point.
if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
if (--N == I)
break;
std::swap(S[I], S[N]);

View file

@ -450,14 +450,19 @@ static bool CanDoGlobalSRA(GlobalVariable *GV) {
/// Copy over the debug info for a variable to its SRA replacements.
static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
uint64_t FragmentOffsetInBits,
uint64_t FragmentSizeInBits,
unsigned NumElements) {
uint64_t FragmentSizeInBits) {
SmallVector<DIGlobalVariableExpression *, 1> GVs;
GV->getDebugInfo(GVs);
for (auto *GVE : GVs) {
DIVariable *Var = GVE->getVariable();
Optional<uint64_t> VarSize = Var->getSizeInBits();
DIExpression *Expr = GVE->getExpression();
if (NumElements > 1) {
// If the FragmentSize is smaller than the variable,
// emit a fragment expression.
// If the variable size is unknown a fragment must be
// emitted to be safe.
if (!VarSize || FragmentSizeInBits < *VarSize) {
if (auto E = DIExpression::createFragmentExpression(
Expr, FragmentOffsetInBits, FragmentSizeInBits))
Expr = *E;
@ -539,8 +544,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
// Copy over the debug info for the variable.
uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType());
uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(ElementIdx);
transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size,
STy->getNumElements());
transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size);
} else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
uint64_t EltSize = DL.getTypeAllocSize(ElTy);
Align EltAlign(DL.getABITypeAlignment(ElTy));
@ -553,7 +557,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
if (NewAlign > EltAlign)
NGV->setAlignment(NewAlign);
transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx,
FragmentSizeInBits, STy->getNumElements());
FragmentSizeInBits);
}
}

View file

@ -115,7 +115,8 @@ class GCOVProfiler {
// list.
Function *
insertCounterWriteout(ArrayRef<std::pair<GlobalVariable *, MDNode *>>);
Function *insertFlush(ArrayRef<std::pair<GlobalVariable *, MDNode *>>);
Function *insertReset(ArrayRef<std::pair<GlobalVariable *, MDNode *>>);
Function *insertFlush(Function *ResetF);
void AddFlushBeforeForkAndExec();
@ -630,35 +631,76 @@ static bool shouldKeepInEntry(BasicBlock::iterator It) {
}
void GCOVProfiler::AddFlushBeforeForkAndExec() {
SmallVector<Instruction *, 2> ForkAndExecs;
SmallVector<CallInst *, 2> Forks;
SmallVector<CallInst *, 2> Execs;
for (auto &F : M->functions()) {
auto *TLI = &GetTLI(F);
for (auto &I : instructions(F)) {
if (CallInst *CI = dyn_cast<CallInst>(&I)) {
if (Function *Callee = CI->getCalledFunction()) {
LibFunc LF;
if (TLI->getLibFunc(*Callee, LF) &&
(LF == LibFunc_fork || LF == LibFunc_execl ||
LF == LibFunc_execle || LF == LibFunc_execlp ||
LF == LibFunc_execv || LF == LibFunc_execvp ||
LF == LibFunc_execve || LF == LibFunc_execvpe ||
LF == LibFunc_execvP)) {
ForkAndExecs.push_back(&I);
if (TLI->getLibFunc(*Callee, LF)) {
if (LF == LibFunc_fork) {
#if !defined(_WIN32)
Forks.push_back(CI);
#endif
} else if (LF == LibFunc_execl || LF == LibFunc_execle ||
LF == LibFunc_execlp || LF == LibFunc_execv ||
LF == LibFunc_execvp || LF == LibFunc_execve ||
LF == LibFunc_execvpe || LF == LibFunc_execvP) {
Execs.push_back(CI);
}
}
}
}
}
}
// We need to split the block after the fork/exec call
// because else the counters for the lines after will be
// the same as before the call.
for (auto I : ForkAndExecs) {
IRBuilder<> Builder(I);
for (auto F : Forks) {
IRBuilder<> Builder(F);
BasicBlock *Parent = F->getParent();
auto NextInst = ++F->getIterator();
// We've a fork so just reset the counters in the child process
FunctionType *FTy = FunctionType::get(Builder.getInt32Ty(), {}, false);
FunctionCallee GCOVFork = M->getOrInsertFunction("__gcov_fork", FTy);
F->setCalledFunction(GCOVFork);
// We split just after the fork to have a counter for the lines after
// Anyway there's a bug:
// void foo() { fork(); }
// void bar() { foo(); blah(); }
// then "blah();" will be called 2 times but showed as 1
// because "blah()" belongs to the same block as "foo();"
Parent->splitBasicBlock(NextInst);
// back() is a br instruction with a debug location
// equals to the one from NextAfterFork
// So to avoid to have two debug locs on two blocks just change it
DebugLoc Loc = F->getDebugLoc();
Parent->back().setDebugLoc(Loc);
}
for (auto E : Execs) {
IRBuilder<> Builder(E);
BasicBlock *Parent = E->getParent();
auto NextInst = ++E->getIterator();
// Since the process is replaced by a new one we need to write out gcdas
// No need to reset the counters since they'll be lost after the exec**
FunctionType *FTy = FunctionType::get(Builder.getVoidTy(), {}, false);
FunctionCallee GCOVFlush = M->getOrInsertFunction("__gcov_flush", FTy);
Builder.CreateCall(GCOVFlush);
I->getParent()->splitBasicBlock(I);
FunctionCallee WriteoutF =
M->getOrInsertFunction("llvm_writeout_files", FTy);
Builder.CreateCall(WriteoutF);
DebugLoc Loc = E->getDebugLoc();
Builder.SetInsertPoint(&*NextInst);
// If the exec** fails we must reset the counters since they've been
// dumped
FunctionCallee ResetF = M->getOrInsertFunction("llvm_reset_counters", FTy);
Builder.CreateCall(ResetF)->setDebugLoc(Loc);
Parent->splitBasicBlock(NextInst);
Parent->back().setDebugLoc(Loc);
}
}
@ -850,7 +892,8 @@ bool GCOVProfiler::emitProfileArcs() {
}
Function *WriteoutF = insertCounterWriteout(CountersBySP);
Function *FlushF = insertFlush(CountersBySP);
Function *ResetF = insertReset(CountersBySP);
Function *FlushF = insertFlush(ResetF);
// Create a small bit of code that registers the "__llvm_gcov_writeout" to
// be executed at exit and the "__llvm_gcov_flush" function to be executed
@ -868,16 +911,14 @@ bool GCOVProfiler::emitProfileArcs() {
IRBuilder<> Builder(BB);
FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
Type *Params[] = {
PointerType::get(FTy, 0),
PointerType::get(FTy, 0)
};
Type *Params[] = {PointerType::get(FTy, 0), PointerType::get(FTy, 0),
PointerType::get(FTy, 0)};
FTy = FunctionType::get(Builder.getVoidTy(), Params, false);
// Initialize the environment and register the local writeout and flush
// functions.
// Initialize the environment and register the local writeout, flush and
// reset functions.
FunctionCallee GCOVInit = M->getOrInsertFunction("llvm_gcov_init", FTy);
Builder.CreateCall(GCOVInit, {WriteoutF, FlushF});
Builder.CreateCall(GCOVInit, {WriteoutF, FlushF, ResetF});
Builder.CreateRetVoid();
appendToGlobalCtors(*M, F, 0);
@ -1190,8 +1231,43 @@ Function *GCOVProfiler::insertCounterWriteout(
return WriteoutF;
}
Function *GCOVProfiler::
insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> > CountersBySP) {
Function *GCOVProfiler::insertReset(
ArrayRef<std::pair<GlobalVariable *, MDNode *>> CountersBySP) {
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
Function *ResetF = M->getFunction("__llvm_gcov_reset");
if (!ResetF)
ResetF = Function::Create(FTy, GlobalValue::InternalLinkage,
"__llvm_gcov_reset", M);
else
ResetF->setLinkage(GlobalValue::InternalLinkage);
ResetF->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
ResetF->addFnAttr(Attribute::NoInline);
if (Options.NoRedZone)
ResetF->addFnAttr(Attribute::NoRedZone);
BasicBlock *Entry = BasicBlock::Create(*Ctx, "entry", ResetF);
IRBuilder<> Builder(Entry);
// Zero out the counters.
for (const auto &I : CountersBySP) {
GlobalVariable *GV = I.first;
Constant *Null = Constant::getNullValue(GV->getValueType());
Builder.CreateStore(Null, GV);
}
Type *RetTy = ResetF->getReturnType();
if (RetTy->isVoidTy())
Builder.CreateRetVoid();
else if (RetTy->isIntegerTy())
// Used if __llvm_gcov_reset was implicitly declared.
Builder.CreateRet(ConstantInt::get(RetTy, 0));
else
report_fatal_error("invalid return type for __llvm_gcov_reset");
return ResetF;
}
Function *GCOVProfiler::insertFlush(Function *ResetF) {
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
Function *FlushF = M->getFunction("__llvm_gcov_flush");
if (!FlushF)
@ -1212,16 +1288,10 @@ insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> > CountersBySP) {
IRBuilder<> Builder(Entry);
Builder.CreateCall(WriteoutF, {});
// Zero out the counters.
for (const auto &I : CountersBySP) {
GlobalVariable *GV = I.first;
Constant *Null = Constant::getNullValue(GV->getValueType());
Builder.CreateStore(Null, GV);
}
Builder.CreateCall(ResetF, {});
Type *RetTy = FlushF->getReturnType();
if (RetTy == Type::getVoidTy(*Ctx))
if (RetTy->isVoidTy())
Builder.CreateRetVoid();
else if (RetTy->isIntegerTy())
// Used if __llvm_gcov_flush was implicitly declared.

View file

@ -104,6 +104,21 @@ static bool mergeEmptyReturnBlocks(Function &F) {
continue;
}
// Skip merging if this would result in a CallBr instruction with a
// duplicate destination. FIXME: See note in CodeGenPrepare.cpp.
bool SkipCallBr = false;
for (pred_iterator PI = pred_begin(&BB), E = pred_end(&BB);
PI != E && !SkipCallBr; ++PI) {
if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator()))
for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
if (RetBlock == CBI->getSuccessor(i)) {
SkipCallBr = true;
break;
}
}
if (SkipCallBr)
continue;
// Otherwise, we found a duplicate return block. Merge the two.
Changed = true;

View file

@ -2254,27 +2254,28 @@ Error BinaryWriter::finalize() {
OrderedSegments.erase(End, std::end(OrderedSegments));
// Compute the section LMA based on its sh_offset and the containing segment's
// p_offset and p_paddr. Also compute the minimum LMA of all sections as
// MinAddr. In the output, the contents between address 0 and MinAddr will be
// skipped.
// p_offset and p_paddr. Also compute the minimum LMA of all non-empty
// sections as MinAddr. In the output, the contents between address 0 and
// MinAddr will be skipped.
uint64_t MinAddr = UINT64_MAX;
for (SectionBase &Sec : Obj.allocSections()) {
if (Sec.ParentSegment != nullptr)
Sec.Addr =
Sec.Offset - Sec.ParentSegment->Offset + Sec.ParentSegment->PAddr;
MinAddr = std::min(MinAddr, Sec.Addr);
if (Sec.Size > 0)
MinAddr = std::min(MinAddr, Sec.Addr);
}
// Now that every section has been laid out we just need to compute the total
// file size. This might not be the same as the offset returned by
// layoutSections, because we want to truncate the last segment to the end of
// its last section, to match GNU objcopy's behaviour.
// its last non-empty section, to match GNU objcopy's behaviour.
TotalSize = 0;
for (SectionBase &Sec : Obj.allocSections()) {
Sec.Offset = Sec.Addr - MinAddr;
if (Sec.Type != SHT_NOBITS)
for (SectionBase &Sec : Obj.allocSections())
if (Sec.Type != SHT_NOBITS && Sec.Size > 0) {
Sec.Offset = Sec.Addr - MinAddr;
TotalSize = std::max(TotalSize, Sec.Offset + Sec.Size);
}
}
if (Error E = Buf.allocate(TotalSize))
return E;

View file

@ -322,11 +322,25 @@ enum class ToolType { Objcopy, Strip, InstallNameTool };
int main(int argc, char **argv) {
InitLLVM X(argc, argv);
ToolName = argv[0];
ToolType Tool = StringSwitch<ToolType>(sys::path::stem(ToolName))
.EndsWith("strip", ToolType::Strip)
.EndsWith("install-name-tool", ToolType::InstallNameTool)
.EndsWith("install_name_tool", ToolType::InstallNameTool)
.Default(ToolType::Objcopy);
StringRef Stem = sys::path::stem(ToolName);
auto Is = [=](StringRef Tool) {
// We need to recognize the following filenames:
//
// llvm-objcopy -> objcopy
// strip-10.exe -> strip
// powerpc64-unknown-freebsd13-objcopy -> objcopy
// llvm-install-name-tool -> install-name-tool
auto I = Stem.rfind_lower(Tool);
return I != StringRef::npos &&
(I + Tool.size() == Stem.size() || !isAlnum(Stem[I + Tool.size()]));
};
ToolType Tool = ToolType::Objcopy;
if (Is("strip"))
Tool = ToolType::Strip;
else if (Is("install-name-tool") || Is("install_name_tool"))
Tool = ToolType::InstallNameTool;
// Expand response files.
// TODO: Move these lines, which are copied from lib/Support/CommandLine.cpp,
// into a separate function in the CommandLine library and call that function