Merge llvm-project release/17.x llvmorg-17.0.0-rc4-10-g0176e8729ea4

This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and
openmp to llvmorg-17.0.0-rc4-10-g0176e8729ea4.

PR:		273753
MFC after:	1 month
This commit is contained in:
Dimitry Andric 2023-09-11 20:37:24 +02:00
commit 8a4dda33d6
249 changed files with 3391 additions and 2246 deletions

View file

@ -1702,7 +1702,7 @@ class DeclContext {
};
/// Number of non-inherited bits in FunctionDeclBitfields.
enum { NumFunctionDeclBits = 30 };
enum { NumFunctionDeclBits = 31 };
/// Stores the bits used by CXXConstructorDecl. If modified
/// NumCXXConstructorDeclBits and the accessor
@ -1714,12 +1714,12 @@ class DeclContext {
/// For the bits in FunctionDeclBitfields.
uint64_t : NumFunctionDeclBits;
/// 21 bits to fit in the remaining available space.
/// 20 bits to fit in the remaining available space.
/// Note that this makes CXXConstructorDeclBitfields take
/// exactly 64 bits and thus the width of NumCtorInitializers
/// will need to be shrunk if some bit is added to NumDeclContextBitfields,
/// NumFunctionDeclBitfields or CXXConstructorDeclBitfields.
uint64_t NumCtorInitializers : 18;
uint64_t NumCtorInitializers : 17;
uint64_t IsInheritingConstructor : 1;
/// Whether this constructor has a trail-allocated explicit specifier.

View file

@ -14,20 +14,21 @@
#ifndef LLVM_CLANG_AST_EXPRCONCEPTS_H
#define LLVM_CLANG_AST_EXPRCONCEPTS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TrailingObjects.h"
#include <utility>
#include <string>
#include <utility>
namespace clang {
class ASTStmtReader;
@ -467,6 +468,13 @@ class NestedRequirement : public Requirement {
}
};
using EntityPrinter = llvm::function_ref<void(llvm::raw_ostream &)>;
/// \brief create a Requirement::SubstitutionDiagnostic with only a
/// SubstitutedEntity and DiagLoc using Sema's allocator.
Requirement::SubstitutionDiagnostic *
createSubstDiagAt(Sema &S, SourceLocation Location, EntityPrinter Printer);
} // namespace concepts
/// C++2a [expr.prim.req]:

View file

@ -165,7 +165,6 @@ CODEGENOPT(PrepareForThinLTO , 1, 0) ///< Set when -flto=thin is enabled on the
///< compile step.
CODEGENOPT(LTOUnit, 1, 0) ///< Emit IR to support LTO unit features (CFI, whole
///< program vtable opt).
CODEGENOPT(FatLTO, 1, 0) ///< Set when -ffat-lto-objects is enabled.
CODEGENOPT(EnableSplitLTOUnit, 1, 0) ///< Enable LTO unit splitting to support
/// CFI and traditional whole program
/// devirtualization that require whole

View file

@ -70,6 +70,8 @@ def note_consteval_address_accessible : Note<
"is not a constant expression">;
def note_constexpr_uninitialized : Note<
"subobject %0 is not initialized">;
def note_constexpr_uninitialized_base : Note<
"constructor of base class %0 is not called">;
def note_constexpr_static_local : Note<
"control flows through the definition of a %select{static|thread_local}0 variable">;
def note_constexpr_subobject_declared_here : Note<

View file

@ -20,6 +20,7 @@ def DeprecatedStaticAnalyzerFlag : DiagGroup<"deprecated-static-analyzer-flag">;
// Empty DiagGroups are recognized by clang but ignored.
def ODR : DiagGroup<"odr">;
def : DiagGroup<"abi">;
def : DiagGroup<"gnu-empty-initializer">; // Now a C extension, not GNU.
def AbsoluteValue : DiagGroup<"absolute-value">;
def MisspelledAssumption : DiagGroup<"misspelled-assumption">;
def UnknownAssumption : DiagGroup<"unknown-assumption">;

View file

@ -285,6 +285,10 @@ def ext_ms_reserved_user_defined_literal : ExtWarn<
def err_unsupported_string_concat : Error<
"unsupported non-standard concatenation of string literals">;
def warn_unevaluated_string_prefix : Warning<
"encoding prefix '%0' on an unevaluated string literal has no effect"
"%select{| and is incompatible with c++2c}1">,
InGroup<DiagGroup<"invalid-unevaluated-string">>;
def err_unevaluated_string_prefix : Error<
"an unevaluated string literal cannot have an encoding prefix">;
def err_unevaluated_string_udl : Error<

View file

@ -23,7 +23,11 @@
namespace llvm {
class hash_code;
class Triple;
namespace opt {
class ArgList;
}
} // namespace llvm
namespace clang {

View file

@ -1414,7 +1414,9 @@ class TargetInfo : public TransferrableTargetInfo,
/// Identify whether this target supports IFuncs.
bool supportsIFunc() const {
return getTriple().isOSBinFormatELF() && !getTriple().isOSFuchsia();
return getTriple().isOSBinFormatELF() &&
((getTriple().isOSLinux() && !getTriple().isMusl()) ||
getTriple().isOSFreeBSD());
}
// Validate the contents of the __builtin_cpu_supports(const char*)

View file

@ -112,7 +112,7 @@ multiclass RVVIntBinBuiltinSet
multiclass RVVSlideOneBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vx", "v", "vve"],
["vx", "Uv", "UvUve"]]>;
["vx", "Uv", "UvUvUe"]]>;
multiclass RVVSignedShiftBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
@ -990,56 +990,6 @@ multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
}
}
// Define vread_csr&vwrite_csr described in RVV intrinsics doc.
let HeaderCode =
[{
enum RVV_CSR {
RVV_VSTART = 0,
RVV_VXSAT,
RVV_VXRM,
RVV_VCSR,
};
static __inline__ __attribute__((__always_inline__, __nodebug__))
unsigned long __riscv_vread_csr(enum RVV_CSR __csr) {
unsigned long __rv = 0;
switch (__csr) {
case RVV_VSTART:
__asm__ __volatile__ ("csrr\t%0, vstart" : "=r"(__rv) : : "memory");
break;
case RVV_VXSAT:
__asm__ __volatile__ ("csrr\t%0, vxsat" : "=r"(__rv) : : "memory");
break;
case RVV_VXRM:
__asm__ __volatile__ ("csrr\t%0, vxrm" : "=r"(__rv) : : "memory");
break;
case RVV_VCSR:
__asm__ __volatile__ ("csrr\t%0, vcsr" : "=r"(__rv) : : "memory");
break;
}
return __rv;
}
static __inline__ __attribute__((__always_inline__, __nodebug__))
void __riscv_vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
switch (__csr) {
case RVV_VSTART:
__asm__ __volatile__ ("csrw\tvstart, %z0" : : "rJ"(__value) : "memory");
break;
case RVV_VXSAT:
__asm__ __volatile__ ("csrw\tvxsat, %z0" : : "rJ"(__value) : "memory");
break;
case RVV_VXRM:
__asm__ __volatile__ ("csrw\tvxrm, %z0" : : "rJ"(__value) : "memory");
break;
case RVV_VCSR:
__asm__ __volatile__ ("csrw\tvcsr, %z0" : : "rJ"(__value) : "memory");
break;
}
}
}] in
def vread_vwrite_csr: RVVHeader;
let HeaderCode =
[{
#define __riscv_vlenb() __builtin_rvv_vlenb()

View file

@ -567,6 +567,10 @@ class CGFunctionInfo final
/// Whether this is a chain call.
unsigned ChainCall : 1;
/// Whether this function is called by forwarding arguments.
/// This doesn't support inalloca or varargs.
unsigned DelegateCall : 1;
/// Whether this function is a CMSE nonsecure call
unsigned CmseNSCall : 1;
@ -616,14 +620,11 @@ class CGFunctionInfo final
CGFunctionInfo() : Required(RequiredArgs::All) {}
public:
static CGFunctionInfo *create(unsigned llvmCC,
bool instanceMethod,
bool chainCall,
const FunctionType::ExtInfo &extInfo,
ArrayRef<ExtParameterInfo> paramInfos,
CanQualType resultType,
ArrayRef<CanQualType> argTypes,
RequiredArgs required);
static CGFunctionInfo *
create(unsigned llvmCC, bool instanceMethod, bool chainCall,
bool delegateCall, const FunctionType::ExtInfo &extInfo,
ArrayRef<ExtParameterInfo> paramInfos, CanQualType resultType,
ArrayRef<CanQualType> argTypes, RequiredArgs required);
void operator delete(void *p) { ::operator delete(p); }
// Friending class TrailingObjects is apparently not good enough for MSVC,
@ -663,6 +664,8 @@ class CGFunctionInfo final
bool isChainCall() const { return ChainCall; }
bool isDelegateCall() const { return DelegateCall; }
bool isCmseNSCall() const { return CmseNSCall; }
bool isNoReturn() const { return NoReturn; }
@ -749,6 +752,7 @@ class CGFunctionInfo final
ID.AddInteger(getASTCallingConvention());
ID.AddBoolean(InstanceMethod);
ID.AddBoolean(ChainCall);
ID.AddBoolean(DelegateCall);
ID.AddBoolean(NoReturn);
ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(NoCallerSavedRegs);
@ -766,17 +770,16 @@ class CGFunctionInfo final
for (const auto &I : arguments())
I.type.Profile(ID);
}
static void Profile(llvm::FoldingSetNodeID &ID,
bool InstanceMethod,
bool ChainCall,
static void Profile(llvm::FoldingSetNodeID &ID, bool InstanceMethod,
bool ChainCall, bool IsDelegateCall,
const FunctionType::ExtInfo &info,
ArrayRef<ExtParameterInfo> paramInfos,
RequiredArgs required,
CanQualType resultType,
RequiredArgs required, CanQualType resultType,
ArrayRef<CanQualType> argTypes) {
ID.AddInteger(info.getCC());
ID.AddBoolean(InstanceMethod);
ID.AddBoolean(ChainCall);
ID.AddBoolean(IsDelegateCall);
ID.AddBoolean(info.getNoReturn());
ID.AddBoolean(info.getProducesResult());
ID.AddBoolean(info.getNoCallerSavedRegs());

View file

@ -2375,11 +2375,6 @@ def fthin_link_bitcode_EQ : Joined<["-"], "fthin-link-bitcode=">,
Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Write minimized bitcode to <file> for the ThinLTO thin link only">,
MarshallingInfoString<CodeGenOpts<"ThinLinkBitcodeFile">>;
defm fat_lto_objects : BoolFOption<"fat-lto-objects",
CodeGenOpts<"FatLTO">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enable">,
NegFlag<SetFalse, [CC1Option], "Disable">,
BothFlags<[CC1Option], " fat LTO object support">>;
def fmacro_backtrace_limit_EQ : Joined<["-"], "fmacro-backtrace-limit=">,
Group<f_Group>, Flags<[NoXarchOption, CC1Option, CoreOption]>,
HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit)">,
@ -5097,6 +5092,10 @@ def mretpoline_external_thunk : Flag<["-"], "mretpoline-external-thunk">, Group<
def mno_retpoline_external_thunk : Flag<["-"], "mno-retpoline-external-thunk">, Group<m_x86_Features_Group>;
def mvzeroupper : Flag<["-"], "mvzeroupper">, Group<m_x86_Features_Group>;
def mno_vzeroupper : Flag<["-"], "mno-vzeroupper">, Group<m_x86_Features_Group>;
def mno_gather : Flag<["-"], "mno-gather">, Group<m_x86_Features_Group>,
HelpText<"Disable generation of gather instructions in auto-vectorization(x86 only)">;
def mno_scatter : Flag<["-"], "mno-scatter">, Group<m_x86_Features_Group>,
HelpText<"Disable generation of scatter instructions in auto-vectorization(x86 only)">;
// These are legacy user-facing driver-level option spellings. They are always
// aliases for options that are spelled using the more common Unix / GNU flag
@ -5162,6 +5161,7 @@ defm caller_saves : BooleanFFlag<"caller-saves">, Group<clang_ignored_gcc_optimi
defm reorder_blocks : BooleanFFlag<"reorder-blocks">, Group<clang_ignored_gcc_optimization_f_Group>;
defm branch_count_reg : BooleanFFlag<"branch-count-reg">, Group<clang_ignored_gcc_optimization_f_Group>;
defm default_inline : BooleanFFlag<"default-inline">, Group<clang_ignored_gcc_optimization_f_Group>;
defm fat_lto_objects : BooleanFFlag<"fat-lto-objects">, Group<clang_ignored_gcc_optimization_f_Group>;
defm float_store : BooleanFFlag<"float-store">, Group<clang_ignored_gcc_optimization_f_Group>;
defm friend_injection : BooleanFFlag<"friend-injection">, Group<clang_ignored_f_Group>;
defm function_attribute_list : BooleanFFlag<"function-attribute-list">, Group<clang_ignored_f_Group>;
@ -7152,6 +7152,10 @@ def _SLASH_QIntel_jcc_erratum : CLFlag<"QIntel-jcc-erratum">,
Alias<mbranches_within_32B_boundaries>;
def _SLASH_arm64EC : CLFlag<"arm64EC">,
HelpText<"Set build target to arm64ec">;
def : CLFlag<"Qgather-">, Alias<mno_gather>,
HelpText<"Disable generation of gather instructions in auto-vectorization(x86 only)">;
def : CLFlag<"Qscatter-">, Alias<mno_scatter>,
HelpText<"Disable generation of scatter instructions in auto-vectorization(x86 only)">;
// Non-aliases:

View file

@ -561,7 +561,7 @@ class ToolChain {
// Return the DWARF version to emit, in the absence of arguments
// to the contrary.
virtual unsigned GetDefaultDwarfVersion() const { return 5; }
virtual unsigned GetDefaultDwarfVersion() const;
// Some toolchains may have different restrictions on the DWARF version and
// may need to adjust it. E.g. NVPTX may need to enforce DWARF2 even when host

View file

@ -12694,8 +12694,6 @@ class Sema final {
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
void diagnoseLogicalInsteadOfBitwise(Expr *Op1, Expr *Op2, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);

View file

@ -9612,9 +9612,8 @@ bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
const LangOptions::LaxVectorConversionKind LVCKind =
getLangOpts().getLaxVectorConversions();
// If __riscv_v_fixed_vlen != N do not allow GNU vector lax conversion.
if (VecTy->getVectorKind() == VectorType::GenericVector &&
getTypeSize(SecondType) != getRVVTypeSize(*this, BT))
// If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT))
return false;
// If -flax-vector-conversions=all is specified, the types are

View file

@ -2418,9 +2418,16 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &BS : CD->bases()) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(),
Value.getStructBase(BaseIndex), Kind,
/*SubobjectDecl=*/nullptr, CheckedTemps))
const APValue &BaseValue = Value.getStructBase(BaseIndex);
if (!BaseValue.hasValue()) {
SourceLocation TypeBeginLoc = BS.getBaseTypeLoc();
Info.FFDiag(TypeBeginLoc, diag::note_constexpr_uninitialized_base)
<< BS.getType() << SourceRange(TypeBeginLoc, BS.getEndLoc());
return false;
}
if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(), BaseValue,
Kind, /*SubobjectDecl=*/nullptr,
CheckedTemps))
return false;
++BaseIndex;
}
@ -15218,14 +15225,6 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
return true;
}
// FIXME: Evaluating values of large array and record types can cause
// performance problems. Only do so in C++11 for now.
if (Exp->isPRValue() &&
(Exp->getType()->isArrayType() || Exp->getType()->isRecordType()) &&
!Ctx.getLangOpts().CPlusPlus11) {
IsConst = false;
return true;
}
return false;
}
@ -15467,12 +15466,6 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
return Name;
});
// FIXME: Evaluating initializers for large array and record types can cause
// performance problems. Only do so in C++11 for now.
if (isPRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
!Ctx.getLangOpts().CPlusPlus11)
return false;
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;

View file

@ -15,7 +15,7 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/TargetParser.h"
#include "llvm/TargetParser/LoongArchTargetParser.h"
using namespace clang;
using namespace clang::targets;
@ -198,7 +198,15 @@ void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,
else
Builder.defineMacro("__loongarch_frlen", "0");
// TODO: define __loongarch_arch and __loongarch_tune.
// Define __loongarch_arch.
StringRef ArchName = getCPU();
Builder.defineMacro("__loongarch_arch", Twine('"') + ArchName + Twine('"'));
// Define __loongarch_tune.
StringRef TuneCPU = getTargetOpts().TuneCPU;
if (TuneCPU.empty())
TuneCPU = ArchName;
Builder.defineMacro("__loongarch_tune", Twine('"') + TuneCPU + Twine('"'));
StringRef ABI = getABI();
if (ABI == "lp64d" || ABI == "lp64f" || ABI == "lp64s")
@ -270,3 +278,12 @@ bool LoongArchTargetInfo::handleTargetFeatures(
}
return true;
}
bool LoongArchTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::LoongArch::isValidCPUName(Name);
}
void LoongArchTargetInfo::fillValidCPUList(
SmallVectorImpl<StringRef> &Values) const {
llvm::LoongArch::fillValidCPUList(Values);
}

View file

@ -24,6 +24,7 @@ namespace targets {
class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
protected:
std::string ABI;
std::string CPU;
bool HasFeatureD;
bool HasFeatureF;
@ -40,6 +41,15 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
WIntType = UnsignedInt;
}
bool setCPU(const std::string &Name) override {
if (!isValidCPUName(Name))
return false;
CPU = Name;
return true;
}
StringRef getCPU() const { return CPU; }
StringRef getABI() const override { return ABI; }
void getTargetDefines(const LangOptions &Opts,
@ -80,6 +90,9 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
const std::vector<std::string> &FeaturesVec) const override;
bool hasFeature(StringRef Feature) const override;
bool isValidCPUName(StringRef Name) const override;
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
};
class LLVM_LIBRARY_VISIBILITY LoongArch32TargetInfo

View file

@ -196,8 +196,8 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
if (ISAInfo->hasExtension("zve32x")) {
Builder.defineMacro("__riscv_vector");
// Currently we support the v0.11 RISC-V V intrinsics.
Builder.defineMacro("__riscv_v_intrinsic", Twine(getVersionValue(0, 11)));
// Currently we support the v0.12 RISC-V V intrinsics.
Builder.defineMacro("__riscv_v_intrinsic", Twine(getVersionValue(0, 12)));
}
auto VScale = getVScaleRange(Opts);

View file

@ -246,7 +246,7 @@ Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1,
}
bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
bool AllowArrays) {
bool AllowArrays, bool AsIfNoUniqueAddr) {
if (FD->isUnnamedBitfield())
return true;
@ -280,13 +280,14 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
// not arrays of records, so we must also check whether we stripped off an
// array type above.
if (isa<CXXRecordDecl>(RT->getDecl()) &&
(WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
(WasArray || (!AsIfNoUniqueAddr && !FD->hasAttr<NoUniqueAddressAttr>())))
return false;
return isEmptyRecord(Context, FT, AllowArrays);
return isEmptyRecord(Context, FT, AllowArrays, AsIfNoUniqueAddr);
}
bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
bool AsIfNoUniqueAddr) {
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return false;
@ -297,11 +298,11 @@ bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const auto &I : CXXRD->bases())
if (!isEmptyRecord(Context, I.getType(), true))
if (!isEmptyRecord(Context, I.getType(), true, AsIfNoUniqueAddr))
return false;
for (const auto *I : RD->fields())
if (!isEmptyField(Context, I, AllowArrays))
if (!isEmptyField(Context, I, AllowArrays, AsIfNoUniqueAddr))
return false;
return true;
}

View file

@ -122,13 +122,19 @@ Address emitMergePHI(CodeGenFunction &CGF, Address Addr1,
llvm::BasicBlock *Block2, const llvm::Twine &Name = "");
/// isEmptyField - Return true iff a the field is "empty", that is it
/// is an unnamed bit-field or an (array of) empty record(s).
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays);
/// is an unnamed bit-field or an (array of) empty record(s). If
/// AsIfNoUniqueAddr is true, then C++ record fields are considered empty if
/// the [[no_unique_address]] attribute would have made them empty.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays,
bool AsIfNoUniqueAddr = false);
/// isEmptyRecord - Return true iff a structure contains only empty
/// fields. Note that a structure with a flexible array member is not
/// considered empty.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
/// considered empty. If AsIfNoUniqueAddr is true, then C++ record fields are
/// considered empty if the [[no_unique_address]] attribute would have made
/// them empty.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
bool AsIfNoUniqueAddr = false);
/// isSingleElementStruct - Determine if a structure is a "single
/// element struct", i.e. it has exactly one non-empty field or

View file

@ -55,7 +55,6 @@
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/TargetParser/Triple.h"
#include "llvm/Transforms/IPO/EmbedBitcodePass.h"
#include "llvm/Transforms/IPO/LowerTypeTests.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
@ -1016,12 +1015,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
});
}
bool IsThinOrUnifiedLTO = IsThinLTO || (IsLTO && CodeGenOpts.UnifiedLTO);
if (CodeGenOpts.FatLTO) {
MPM = PB.buildFatLTODefaultPipeline(Level, IsThinOrUnifiedLTO,
IsThinOrUnifiedLTO ||
shouldEmitRegularLTOSummary());
} else if (IsThinOrUnifiedLTO) {
if (IsThinLTO || (IsLTO && CodeGenOpts.UnifiedLTO)) {
MPM = PB.buildThinLTOPreLinkDefaultPipeline(Level);
} else if (IsLTO) {
MPM = PB.buildLTOPreLinkDefaultPipeline(Level);
@ -1077,21 +1071,6 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
EmitLTOSummary));
}
}
if (CodeGenOpts.FatLTO) {
// Set module flags, like EnableSplitLTOUnit and UnifiedLTO, since FatLTO
// uses a different action than Backend_EmitBC or Backend_EmitLL.
bool IsThinOrUnifiedLTO =
CodeGenOpts.PrepareForThinLTO ||
(CodeGenOpts.PrepareForLTO && CodeGenOpts.UnifiedLTO);
if (!TheModule->getModuleFlag("ThinLTO"))
TheModule->addModuleFlag(Module::Error, "ThinLTO",
uint32_t(IsThinOrUnifiedLTO));
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
uint32_t(CodeGenOpts.EnableSplitLTOUnit));
if (CodeGenOpts.UnifiedLTO && !TheModule->getModuleFlag("UnifiedLTO"))
TheModule->addModuleFlag(Module::Error, "UnifiedLTO", uint32_t(1));
}
// Now that we have all of the passes ready, run them.
{

View file

@ -312,8 +312,7 @@ void CGCXXABI::setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
llvm::GlobalValue::LinkageTypes CGCXXABI::getCXXDestructorLinkage(
GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const {
// Delegate back to CGM by default.
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
/*IsConstantVariable=*/false);
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage);
}
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {

View file

@ -13,6 +13,7 @@
#include "CGCall.h"
#include "ABIInfo.h"
#include "ABIInfoImpl.h"
#include "CGBlocks.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
@ -112,8 +113,7 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
/*instanceMethod=*/false,
/*chainCall=*/false, std::nullopt,
FnInfoOpts::None, std::nullopt,
FTNP->getExtInfo(), {}, RequiredArgs(0));
}
@ -189,10 +189,10 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
appendParameterTypes(CGT, prefix, paramInfos, FTP);
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
/*chainCall=*/false, prefix,
FTP->getExtInfo(), paramInfos,
Required);
FnInfoOpts opts =
instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None;
return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix,
FTP->getExtInfo(), paramInfos, Required);
}
/// Arrange the argument and result information for a value of the
@ -271,7 +271,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
argTypes.push_back(DeriveThisType(RD, MD));
return ::arrangeLLVMFunctionInfo(
*this, true, argTypes,
*this, /*instanceMethod=*/true, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
@ -363,9 +363,8 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
: TheCXXABI.hasMostDerivedReturn(GD)
? CGM.getContext().VoidPtrTy
: Context.VoidTy;
return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
/*chainCall=*/false, argTypes, extInfo,
paramInfos, required);
return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod,
argTypes, extInfo, paramInfos, required);
}
static SmallVector<CanQualType, 16>
@ -439,9 +438,9 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
ArgTypes.size());
}
return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
/*chainCall=*/false, ArgTypes, Info,
ParamInfos, Required);
return arrangeLLVMFunctionInfo(ResultType, FnInfoOpts::IsInstanceMethod,
ArgTypes, Info, ParamInfos, Required);
}
/// Arrange the argument and result information for the declaration or
@ -460,10 +459,9 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// When declaring a function without a prototype, always use a
// non-variadic type.
if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
return arrangeLLVMFunctionInfo(
noProto->getReturnType(), /*instanceMethod=*/false,
/*chainCall=*/false, std::nullopt, noProto->getExtInfo(), {},
RequiredArgs::All);
return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None,
std::nullopt, noProto->getExtInfo(), {},
RequiredArgs::All);
}
return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
@ -512,9 +510,9 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
return arrangeLLVMFunctionInfo(
GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
/*chainCall=*/false, argTys, einfo, extParamInfos, required);
return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()),
FnInfoOpts::None, argTys, einfo, extParamInfos,
required);
}
const CGFunctionInfo &
@ -523,9 +521,8 @@ CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
auto argTypes = getArgTypesForCall(Context, args);
FunctionType::ExtInfo einfo;
return arrangeLLVMFunctionInfo(
GetReturnType(returnType), /*instanceMethod=*/false,
/*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
return arrangeLLVMFunctionInfo(GetReturnType(returnType), FnInfoOpts::None,
argTypes, einfo, {}, RequiredArgs::All);
}
const CGFunctionInfo &
@ -550,8 +547,7 @@ CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
assert(MD->isVirtual() && "only methods have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
/*chainCall=*/false, ArgTys,
return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::None, ArgTys,
FTP->getExtInfo(), {}, RequiredArgs(1));
}
@ -570,9 +566,8 @@ CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
ArgTys.push_back(Context.IntTy);
CallingConv CC = Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true);
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
/*chainCall=*/false, ArgTys,
FunctionType::ExtInfo(CC), {},
return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::IsInstanceMethod,
ArgTys, FunctionType::ExtInfo(CC), {},
RequiredArgs::All);
}
@ -616,10 +611,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
SmallVector<CanQualType, 16> argTypes;
for (const auto &arg : args)
argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None;
return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
/*instanceMethod=*/false, chainCall,
argTypes, fnType->getExtInfo(), paramInfos,
required);
opts, argTypes, fnType->getExtInfo(),
paramInfos, required);
}
/// Figure out the rules for calling a function with the given formal
@ -650,8 +645,8 @@ CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
auto argTypes = getArgTypesForDeclaration(Context, params);
return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
/*instanceMethod*/ false, /*chainCall*/ false,
argTypes, proto->getExtInfo(), paramInfos,
FnInfoOpts::None, argTypes,
proto->getExtInfo(), paramInfos,
RequiredArgs::forPrototypePlus(proto, 1));
}
@ -662,10 +657,9 @@ CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
SmallVector<CanQualType, 16> argTypes;
for (const auto &Arg : args)
argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
return arrangeLLVMFunctionInfo(
GetReturnType(resultType), /*instanceMethod=*/false,
/*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
/*paramInfos=*/ {}, RequiredArgs::All);
return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None,
argTypes, FunctionType::ExtInfo(),
/*paramInfos=*/{}, RequiredArgs::All);
}
const CGFunctionInfo &
@ -673,17 +667,17 @@ CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
const FunctionArgList &args) {
auto argTypes = getArgTypesForDeclaration(Context, args);
return arrangeLLVMFunctionInfo(
GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None,
argTypes, FunctionType::ExtInfo(), {},
RequiredArgs::All);
}
const CGFunctionInfo &
CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
ArrayRef<CanQualType> argTypes) {
return arrangeLLVMFunctionInfo(
resultType, /*instanceMethod=*/false, /*chainCall=*/false,
argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::None, argTypes,
FunctionType::ExtInfo(), {},
RequiredArgs::All);
}
/// Arrange a call to a C++ method, passing the given arguments.
@ -706,15 +700,15 @@ CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
auto argTypes = getArgTypesForCall(Context, args);
FunctionType::ExtInfo info = proto->getExtInfo();
return arrangeLLVMFunctionInfo(
GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
/*chainCall=*/false, argTypes, info, paramInfos, required);
return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
FnInfoOpts::IsInstanceMethod, argTypes, info,
paramInfos, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
return arrangeLLVMFunctionInfo(
getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
std::nullopt, FunctionType::ExtInfo(), {}, RequiredArgs::All);
return arrangeLLVMFunctionInfo(getContext().VoidTy, FnInfoOpts::None,
std::nullopt, FunctionType::ExtInfo(), {},
RequiredArgs::All);
}
const CGFunctionInfo &
@ -734,12 +728,15 @@ CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
auto argTypes = getArgTypesForCall(Context, args);
assert(signature.getRequiredArgs().allowsOptionalArgs());
return arrangeLLVMFunctionInfo(signature.getReturnType(),
signature.isInstanceMethod(),
signature.isChainCall(),
argTypes,
signature.getExtInfo(),
paramInfos,
FnInfoOpts opts = FnInfoOpts::None;
if (signature.isInstanceMethod())
opts |= FnInfoOpts::IsInstanceMethod;
if (signature.isChainCall())
opts |= FnInfoOpts::IsChainCall;
if (signature.isDelegateCall())
opts |= FnInfoOpts::IsDelegateCall;
return arrangeLLVMFunctionInfo(signature.getReturnType(), opts, argTypes,
signature.getExtInfo(), paramInfos,
signature.getRequiredArgs());
}
@ -752,21 +749,24 @@ void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
/// Arrange the argument and result information for an abstract value
/// of a given function type. This is the method which all of the
/// above functions ultimately defer to.
const CGFunctionInfo &
CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
bool instanceMethod,
bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs required) {
const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo(
CanQualType resultType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs required) {
assert(llvm::all_of(argTypes,
[](CanQualType T) { return T.isCanonicalAsParam(); }));
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
required, resultType, argTypes);
bool isInstanceMethod =
(opts & FnInfoOpts::IsInstanceMethod) == FnInfoOpts::IsInstanceMethod;
bool isChainCall =
(opts & FnInfoOpts::IsChainCall) == FnInfoOpts::IsChainCall;
bool isDelegateCall =
(opts & FnInfoOpts::IsDelegateCall) == FnInfoOpts::IsDelegateCall;
CGFunctionInfo::Profile(ID, isInstanceMethod, isChainCall, isDelegateCall,
info, paramInfos, required, resultType, argTypes);
void *insertPos = nullptr;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
@ -776,8 +776,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Construct the function info. We co-allocate the ArgInfos.
FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
paramInfos, resultType, argTypes, required);
FI = CGFunctionInfo::create(CC, isInstanceMethod, isChainCall, isDelegateCall,
info, paramInfos, resultType, argTypes, required);
FunctionInfos.InsertNode(FI, insertPos);
bool inserted = FunctionsBeingProcessed.insert(FI).second;
@ -812,9 +812,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
return *FI;
}
CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
bool instanceMethod,
bool chainCall,
CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod,
bool chainCall, bool delegateCall,
const FunctionType::ExtInfo &info,
ArrayRef<ExtParameterInfo> paramInfos,
CanQualType resultType,
@ -834,6 +833,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->ASTCallingConvention = info.getCC();
FI->InstanceMethod = instanceMethod;
FI->ChainCall = chainCall;
FI->DelegateCall = delegateCall;
FI->CmseNSCall = info.getCmseNSCall();
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
@ -3989,10 +3989,6 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
QualType type = param->getType();
if (isInAllocaArgument(CGM.getCXXABI(), type)) {
CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
}
// GetAddrOfLocalVar returns a pointer-to-pointer for references,
// but the argument needs to be the original pointer.
if (type->isReferenceType()) {
@ -5105,7 +5101,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
"indirect argument must be in alloca address space");
bool NeedCopy = false;
if (Addr.getAlignment() < Align &&
llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
Align.getAsAlign()) {
@ -5244,30 +5239,50 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
llvm::Type *SrcTy = Src.getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::TypeSize SrcTypeSize =
CGM.getDataLayout().getTypeAllocSize(SrcTy);
llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
if (SrcTypeSize.isScalable()) {
assert(STy->containsHomogeneousScalableVectorTypes() &&
"ABI only supports structure with homogeneous scalable vector "
"type");
assert(SrcTypeSize == DstTypeSize &&
"Only allow non-fractional movement of structure with "
"homogeneous scalable vector type");
assert(NumIRArgs == STy->getNumElements());
// If the source type is smaller than the destination type of the
// coerce-to logic, copy the source value into a temp alloca the size
// of the destination type to allow loading all of it. The bits past
// the source value are left undef.
if (SrcSize < DstSize) {
Address TempAlloca
= CreateTempAlloca(STy, Src.getAlignment(),
Src.getName() + ".coerce");
Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
Src = TempAlloca;
llvm::Value *StoredStructValue =
Builder.CreateLoad(Src, Src.getName() + ".tuple");
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *Extract = Builder.CreateExtractValue(
StoredStructValue, i, Src.getName() + ".extract" + Twine(i));
IRCallArgs[FirstIRArg + i] = Extract;
}
} else {
Src = Src.withElementType(STy);
}
uint64_t SrcSize = SrcTypeSize.getFixedValue();
uint64_t DstSize = DstTypeSize.getFixedValue();
assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Address EltPtr = Builder.CreateStructGEP(Src, i);
llvm::Value *LI = Builder.CreateLoad(EltPtr);
if (ArgHasMaybeUndefAttr)
LI = Builder.CreateFreeze(LI);
IRCallArgs[FirstIRArg + i] = LI;
// If the source type is smaller than the destination type of the
// coerce-to logic, copy the source value into a temp alloca the size
// of the destination type to allow loading all of it. The bits past
// the source value are left undef.
if (SrcSize < DstSize) {
Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
Src.getName() + ".coerce");
Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
Src = TempAlloca;
} else {
Src = Src.withElementType(STy);
}
assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Address EltPtr = Builder.CreateStructGEP(Src, i);
llvm::Value *LI = Builder.CreateLoad(EltPtr);
if (ArgHasMaybeUndefAttr)
LI = Builder.CreateFreeze(LI);
IRCallArgs[FirstIRArg + i] = LI;
}
}
} else {
// In the simple case, just pass the coerced loaded value.
@ -5472,6 +5487,30 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
}
// The await_suspend call performed by co_await is essentially asynchronous
// to the execution of the coroutine. Inlining it normally into an unsplit
// coroutine can cause miscompilation because the coroutine CFG misrepresents
// the true control flow of the program: things that happen in the
// await_suspend are not guaranteed to happen prior to the resumption of the
// coroutine, and things that happen after the resumption of the coroutine
// (including its exit and the potential deallocation of the coroutine frame)
// are not guaranteed to happen only after the end of await_suspend.
//
// The short-term solution to this problem is to mark the call as uninlinable.
// But we don't want to do this if the call is known to be trivial, which is
// very common.
//
// The long-term solution may introduce patterns like:
//
// call @llvm.coro.await_suspend(ptr %awaiter, ptr %handle,
// ptr @awaitSuspendFn)
//
// Then it is much easier to perform the safety analysis in the middle end.
// If it is safe to inline the call to awaitSuspend, we can replace it in the
// CoroEarly pass. Otherwise we could replace it in the CoroSplit pass.
if (inSuspendBlock() && mayCoroHandleEscape())
Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
// Disable inlining inside SEH __try blocks.
if (isSEHTryScope()) {
Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
@ -5765,9 +5804,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestIsVolatile = false;
}
// If the value is offset in memory, apply the offset now.
Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
// An empty record can overlap other data (if declared with
// no_unique_address); omit the store for such types - as there is no
// actual data to store.
if (!isEmptyRecord(getContext(), RetTy, true)) {
// If the value is offset in memory, apply the offset now.
Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
}
return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}

View file

@ -383,6 +383,35 @@ void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F,
const TargetOptions &TargetOpts,
bool WillInternalize);
enum class FnInfoOpts {
None = 0,
IsInstanceMethod = 1 << 0,
IsChainCall = 1 << 1,
IsDelegateCall = 1 << 2,
};
inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) {
return static_cast<FnInfoOpts>(
static_cast<std::underlying_type_t<FnInfoOpts>>(A) |
static_cast<std::underlying_type_t<FnInfoOpts>>(B));
}
inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) {
return static_cast<FnInfoOpts>(
static_cast<std::underlying_type_t<FnInfoOpts>>(A) &
static_cast<std::underlying_type_t<FnInfoOpts>>(B));
}
inline FnInfoOpts operator|=(FnInfoOpts A, FnInfoOpts B) {
A = A | B;
return A;
}
inline FnInfoOpts operator&=(FnInfoOpts A, FnInfoOpts B) {
A = A & B;
return A;
}
} // end namespace CodeGen
} // end namespace clang

View file

@ -2927,14 +2927,16 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
}
void CodeGenFunction::EmitForwardingCallToLambda(
const CXXMethodDecl *callOperator,
CallArgList &callArgs) {
const CXXMethodDecl *callOperator, CallArgList &callArgs,
const CGFunctionInfo *calleeFnInfo, llvm::Constant *calleePtr) {
// Get the address of the call operator.
const CGFunctionInfo &calleeFnInfo =
CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
llvm::Constant *calleePtr =
CGM.GetAddrOfFunction(GlobalDecl(callOperator),
CGM.getTypes().GetFunctionType(calleeFnInfo));
if (!calleeFnInfo)
calleeFnInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
if (!calleePtr)
calleePtr =
CGM.GetAddrOfFunction(GlobalDecl(callOperator),
CGM.getTypes().GetFunctionType(*calleeFnInfo));
// Prepare the return slot.
const FunctionProtoType *FPT =
@ -2942,8 +2944,8 @@ void CodeGenFunction::EmitForwardingCallToLambda(
QualType resultType = FPT->getReturnType();
ReturnValueSlot returnSlot;
if (!resultType->isVoidType() &&
calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
!hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
calleeFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
!hasScalarEvaluationKind(calleeFnInfo->getReturnType()))
returnSlot =
ReturnValueSlot(ReturnValue, resultType.isVolatileQualified(),
/*IsUnused=*/false, /*IsExternallyDestructed=*/true);
@ -2954,7 +2956,7 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// Now emit our call.
auto callee = CGCallee::forDirect(calleePtr, GlobalDecl(callOperator));
RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs);
RValue RV = EmitCall(*calleeFnInfo, callee, returnSlot, callArgs);
// If necessary, copy the returned value into the slot.
if (!resultType->isVoidType() && returnSlot.isNull()) {
@ -2996,7 +2998,15 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
EmitForwardingCallToLambda(CallOp, CallArgs);
}
void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
if (MD->isVariadic()) {
// FIXME: Making this work correctly is nasty because it requires either
// cloning the body of the call operator or making the call operator
// forward.
CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
return;
}
const CXXRecordDecl *Lambda = MD->getParent();
// Start building arguments for forwarding call
@ -3007,10 +3017,16 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture");
CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
// Add the rest of the parameters.
EmitLambdaDelegatingInvokeBody(MD, CallArgs);
}
void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
CallArgList &CallArgs) {
// Add the rest of the forwarded parameters.
for (auto *Param : MD->parameters())
EmitDelegateCallArg(CallArgs, Param, Param->getBeginLoc());
const CXXRecordDecl *Lambda = MD->getParent();
const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
// For a generic lambda, find the corresponding call operator specialization
// to which the call to the static-invoker shall be forwarded.
@ -3024,10 +3040,21 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
assert(CorrespondingCallOpSpecialization);
CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
}
// Special lambda forwarding when there are inalloca parameters.
if (hasInAllocaArg(MD)) {
const CGFunctionInfo *ImplFnInfo = nullptr;
llvm::Function *ImplFn = nullptr;
EmitLambdaInAllocaImplFn(CallOp, &ImplFnInfo, &ImplFn);
EmitForwardingCallToLambda(CallOp, CallArgs, ImplFnInfo, ImplFn);
return;
}
EmitForwardingCallToLambda(CallOp, CallArgs);
}
void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
void CodeGenFunction::EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD) {
if (MD->isVariadic()) {
// FIXME: Making this work correctly is nasty because it requires either
// cloning the body of the call operator or making the call operator forward.
@ -3035,5 +3062,56 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
return;
}
EmitLambdaDelegatingInvokeBody(MD);
// Forward %this argument.
CallArgList CallArgs;
QualType LambdaType = getContext().getRecordType(MD->getParent());
QualType ThisType = getContext().getPointerType(LambdaType);
llvm::Value *ThisArg = CurFn->getArg(0);
CallArgs.add(RValue::get(ThisArg), ThisType);
EmitLambdaDelegatingInvokeBody(MD, CallArgs);
}
void CodeGenFunction::EmitLambdaInAllocaImplFn(
const CXXMethodDecl *CallOp, const CGFunctionInfo **ImplFnInfo,
llvm::Function **ImplFn) {
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeCXXMethodDeclaration(CallOp);
llvm::Function *CallOpFn =
cast<llvm::Function>(CGM.GetAddrOfFunction(GlobalDecl(CallOp)));
// Emit function containing the original call op body. __invoke will delegate
// to this function.
SmallVector<CanQualType, 4> ArgTypes;
for (auto I = FnInfo.arg_begin(); I != FnInfo.arg_end(); ++I)
ArgTypes.push_back(I->type);
*ImplFnInfo = &CGM.getTypes().arrangeLLVMFunctionInfo(
FnInfo.getReturnType(), FnInfoOpts::IsDelegateCall, ArgTypes,
FnInfo.getExtInfo(), {}, FnInfo.getRequiredArgs());
// Create mangled name as if this was a method named __impl. If for some
// reason the name doesn't look as expected then just tack __impl to the
// front.
// TODO: Use the name mangler to produce the right name instead of using
// string replacement.
StringRef CallOpName = CallOpFn->getName();
std::string ImplName;
if (size_t Pos = CallOpName.find_first_of("<lambda"))
ImplName = ("?__impl@" + CallOpName.drop_front(Pos)).str();
else
ImplName = ("__impl" + CallOpName).str();
llvm::Function *Fn = CallOpFn->getParent()->getFunction(ImplName);
if (!Fn) {
Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(**ImplFnInfo),
llvm::GlobalValue::InternalLinkage, ImplName,
CGM.getModule());
CGM.SetInternalFunctionAttributes(CallOp, Fn, **ImplFnInfo);
const GlobalDecl &GD = GlobalDecl(CallOp);
const auto *D = cast<FunctionDecl>(GD.getDecl());
CodeGenFunction(CGM).GenerateCode(GD, Fn, **ImplFnInfo);
CGM.SetLLVMFunctionAttributesForDefinition(D, Fn);
}
*ImplFn = Fn;
}

View file

@ -139,6 +139,36 @@ static bool memberCallExpressionCanThrow(const Expr *E) {
return true;
}
/// Return true when the coroutine handle may escape from the await-suspend
/// (`awaiter.await_suspend(std::coroutine_handle)` expression).
/// Return false only when the coroutine wouldn't escape in the await-suspend
/// for sure.
///
/// While it is always safe to return true, return falses can bring better
/// performances.
///
/// See https://github.com/llvm/llvm-project/issues/56301 and
/// https://reviews.llvm.org/D157070 for the example and the full discussion.
///
/// FIXME: It will be much better to perform such analysis in the middle end.
/// See the comments in `CodeGenFunction::EmitCall` for example.
static bool MayCoroHandleEscape(CoroutineSuspendExpr const &S) {
CXXRecordDecl *Awaiter =
S.getCommonExpr()->getType().getNonReferenceType()->getAsCXXRecordDecl();
// Return true conservatively if the awaiter type is not a record type.
if (!Awaiter)
return true;
// In case the awaiter type is empty, the suspend wouldn't leak the coroutine
// handle.
//
// TODO: We can improve this by looking into the implementation of
// await-suspend and see if the coroutine handle is passed to foreign
// functions.
return !Awaiter->field_empty();
}
// Emit suspend expression which roughly looks like:
//
// auto && x = CommonExpr();
@ -199,8 +229,11 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr});
CGF.CurCoro.InSuspendBlock = true;
CGF.CurCoro.MayCoroHandleEscape = MayCoroHandleEscape(S);
auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr());
CGF.CurCoro.InSuspendBlock = false;
CGF.CurCoro.MayCoroHandleEscape = false;
if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)) {
// Veto suspension if requested by bool returning await_suspend.
BasicBlock *RealSuspendBlock =

View file

@ -391,12 +391,14 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
SourceManager &SM = CGM.getContext().getSourceManager();
StringRef FileName;
FileID FID;
std::optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (Loc.isInvalid()) {
// The DIFile used by the CU is distinct from the main source file. Call
// createFile() below for canonicalization if the source file was specified
// with an absolute path.
FileName = TheCU->getFile()->getFilename();
CSInfo = TheCU->getFile()->getChecksum();
} else {
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
FileName = PLoc.getFilename();
@ -417,13 +419,14 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return cast<llvm::DIFile>(V);
}
// Put Checksum at a scope where it will persist past the createFile call.
SmallString<64> Checksum;
std::optional<llvm::DIFile::ChecksumKind> CSKind =
if (!CSInfo) {
std::optional<llvm::DIFile::ChecksumKind> CSKind =
computeChecksum(FID, Checksum);
std::optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
}
return createFile(FileName, CSInfo, getSource(SM, SM.getFileID(Loc)));
}

View file

@ -148,7 +148,7 @@ class CGDebugInfo {
llvm::BumpPtrAllocator DebugInfoNames;
StringRef CWDName;
llvm::StringMap<llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
/// Cache declarations relevant to DW_TAG_imported_declarations (C++
/// using declarations and global alias variables) that aren't covered

View file

@ -202,7 +202,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
return;
llvm::GlobalValue::LinkageTypes Linkage =
CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
CGM.getLLVMLinkageVarDefinition(&D);
// FIXME: We need to force the emission/use of a guard variable for
// some variables even if we can constant-evaluate them because

View file

@ -279,8 +279,8 @@ llvm::Function *CodeGenFunction::createTLSAtExitStub(
}
const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
getContext().IntTy, /*instanceMethod=*/false, /*chainCall=*/false,
{getContext().IntTy}, FunctionType::ExtInfo(), {}, RequiredArgs::All);
getContext().IntTy, FnInfoOpts::None, {getContext().IntTy},
FunctionType::ExtInfo(), {}, RequiredArgs::All);
// Get the stub function type, int(*)(int,...).
llvm::FunctionType *StubTy =

View file

@ -2692,8 +2692,7 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
/// this context.
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
const DeclRefExpr *E,
const VarDecl *VD,
bool IsConstant) {
const VarDecl *VD) {
// For a variable declared in an enclosing scope, do not emit a spurious
// reference even if we have a capture, as that will emit an unwarranted
// reference to our capture state, and will likely generate worse code than
@ -2726,7 +2725,7 @@ static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
// We can emit a spurious reference only if the linkage implies that we'll
// be emitting a non-interposable symbol that will be retained until link
// time.
switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) {
switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
case llvm::GlobalValue::ExternalLinkage:
case llvm::GlobalValue::LinkOnceODRLinkage:
case llvm::GlobalValue::WeakODRLinkage:
@ -2757,7 +2756,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// constant value directly instead.
if (E->isNonOdrUse() == NOUR_Constant &&
(VD->getType()->isReferenceType() ||
!canEmitSpuriousReferenceToVariable(*this, E, VD, true))) {
!canEmitSpuriousReferenceToVariable(*this, E, VD))) {
VD->getAnyInitializer(VD);
llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
E->getLocation(), *VD->evaluateValue(), VD->getType());
@ -2859,7 +2858,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// some reason; most likely, because it's in an outer function.
} else if (VD->isStaticLocal()) {
llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
*VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false));
*VD, CGM.getLLVMLinkageVarDefinition(VD));
addr = Address(
var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));

View file

@ -1918,7 +1918,7 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
if (VD->isLocalVarDecl()) {
return CGM.getOrCreateStaticVarDecl(
*VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false));
*VD, CGM.getLLVMLinkageVarDefinition(VD));
}
}
}

View file

@ -1667,7 +1667,7 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
auto LinkageForVariable = [&VD, this]() {
return CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
return CGM.getLLVMLinkageVarDefinition(VD);
};
std::vector<llvm::GlobalVariable *> GeneratedRefs;
@ -10151,6 +10151,13 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
// If this is an 'extern' declaration we defer to the canonical definition and
// do not emit an offloading entry.
if (Res && *Res != OMPDeclareTargetDeclAttr::MT_Link &&
VD->hasExternalStorage())
return;
if (!Res) {
if (CGM.getLangOpts().OpenMPIsTargetDevice) {
// Register non-target variables being emitted in device code (debug info
@ -10163,7 +10170,7 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
auto LinkageForVariable = [&VD, this]() {
return CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
return CGM.getLLVMLinkageVarDefinition(VD);
};
std::vector<llvm::GlobalVariable *> GeneratedRefs;

View file

@ -65,9 +65,8 @@ CodeGen::arrangeFreeFunctionCall(CodeGenModule &CGM,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs args) {
return CGM.getTypes().arrangeLLVMFunctionInfo(
returnType, /*instanceMethod=*/false, /*chainCall=*/false, argTypes,
info, {}, args);
return CGM.getTypes().arrangeLLVMFunctionInfo(returnType, FnInfoOpts::None,
argTypes, info, {}, args);
}
ImplicitCXXConstructorArgs

View file

@ -572,7 +572,7 @@ llvm::ConstantInt *
CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
// Remove any (C++17) exception specifications, to allow calling e.g. a
// noexcept function through a non-noexcept pointer.
if (!isa<FunctionNoProtoType>(Ty))
if (!Ty->isFunctionNoProtoType())
Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None);
std::string Mangled;
llvm::raw_string_ostream Out(Mangled);
@ -683,6 +683,19 @@ static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
return true;
}
bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
}
bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
getTarget().getCXXABI().isMicrosoft() &&
llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
return isInAllocaArgument(CGM.getCXXABI(), P->getType());
});
}
/// Return the UBSan prologue signature for \p FD if one is available.
static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
const FunctionDecl *FD) {
@ -1447,6 +1460,17 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// The lambda static invoker function is special, because it forwards or
// clones the body of the function call operator (but is actually static).
EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
} else if (isa<CXXMethodDecl>(FD) &&
isLambdaCallOperator(cast<CXXMethodDecl>(FD)) &&
!FnInfo.isDelegateCall() &&
cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
// If emitting a lambda with static invoker on X86 Windows, change
// the call operator body.
// Make sure that this is a call operator with an inalloca arg and check
// for delegate call to make sure this is the original call op and not the
// new forwarding function for the static invoker.
EmitLambdaInAllocaCallOpBody(cast<CXXMethodDecl>(FD));
} else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
(cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {

View file

@ -334,6 +334,7 @@ class CodeGenFunction : public CodeGenTypeCache {
struct CGCoroInfo {
std::unique_ptr<CGCoroData> Data;
bool InSuspendBlock = false;
bool MayCoroHandleEscape = false;
CGCoroInfo();
~CGCoroInfo();
};
@ -347,6 +348,10 @@ class CodeGenFunction : public CodeGenTypeCache {
return isCoroutine() && CurCoro.InSuspendBlock;
}
bool mayCoroHandleEscape() const {
return isCoroutine() && CurCoro.MayCoroHandleEscape;
}
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
@ -1963,6 +1968,9 @@ class CodeGenFunction : public CodeGenTypeCache {
/// Check if the return value of this function requires sanitization.
bool requiresReturnValueCheck() const;
bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
bool hasInAllocaArg(const CXXMethodDecl *MD);
llvm::BasicBlock *TerminateLandingPad = nullptr;
llvm::BasicBlock *TerminateHandler = nullptr;
llvm::SmallVector<llvm::BasicBlock *, 2> TrapBBs;
@ -2227,10 +2235,17 @@ class CodeGenFunction : public CodeGenTypeCache {
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
CallArgList &CallArgs);
CallArgList &CallArgs,
const CGFunctionInfo *CallOpFnInfo = nullptr,
llvm::Constant *CallOpFn = nullptr);
void EmitLambdaBlockInvokeBody();
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
CallArgList &CallArgs);
void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp,
const CGFunctionInfo **ImplFnInfo,
llvm::Function **ImplFn);
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD);
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV) {
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
}

View file

@ -1974,7 +1974,7 @@ CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
return llvm::GlobalValue::InternalLinkage;
}
return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
return getLLVMLinkageForDeclarator(D, Linkage);
}
llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
@ -2386,7 +2386,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// functions. If the current target's C++ ABI requires this and this is a
// member function, set its alignment accordingly.
if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
if (F->getPointerAlignment(getDataLayout()) < 2 && isa<CXXMethodDecl>(D))
if (isa<CXXMethodDecl>(D) && F->getPointerAlignment(getDataLayout()) < 2)
F->setAlignment(std::max(llvm::Align(2), F->getAlign().valueOrOne()));
}
@ -3605,6 +3605,13 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Emit declaration of the must-be-emitted declare target variable.
if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
// If this variable has external storage and doesn't require special
// link handling we defer to its canonical definition.
if (VD->hasExternalStorage() &&
Res != OMPDeclareTargetDeclAttr::MT_Link)
return;
bool UnifiedMemoryEnabled =
getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
@ -3638,6 +3645,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
// Emit the definition if it can't be deferred.
EmitGlobalDefinition(GD);
addEmittedDeferredDecl(GD);
return;
}
@ -3657,7 +3665,6 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// The value must be emitted, but cannot be emitted eagerly.
assert(!MayBeEmittedEagerly(Global));
addDeferredDeclToEmit(GD);
EmittedDeferredDecls[MangledName] = GD;
} else {
// Otherwise, remember that we saw a deferred decl with this name. The
// first use of the mangled name will cause it to move into
@ -4397,7 +4404,6 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
// DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
// don't need it anymore).
addDeferredDeclToEmit(DDI->second);
EmittedDeferredDecls[DDI->first] = DDI->second;
DeferredDecls.erase(DDI);
// Otherwise, there are cases we have to worry about where we're
@ -4678,7 +4684,6 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
// Move the potentially referenced deferred decl to the DeferredDeclsToEmit
// list, and remove it from DeferredDecls (since we don't need it anymore).
addDeferredDeclToEmit(DDI->second);
EmittedDeferredDecls[DDI->first] = DDI->second;
DeferredDecls.erase(DDI);
}
@ -5221,8 +5226,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
AddGlobalAnnotations(D, GV);
// Set the llvm linkage type as appropriate.
llvm::GlobalValue::LinkageTypes Linkage =
getLLVMLinkageVarDefinition(D, GV->isConstant());
llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(D);
// CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
// the device. [...]"
@ -5415,8 +5419,9 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
return false;
}
llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
llvm::GlobalValue::LinkageTypes
CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D,
GVALinkage Linkage) {
if (Linkage == GVA_Internal)
return llvm::Function::InternalLinkage;
@ -5486,10 +5491,10 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
return llvm::GlobalVariable::ExternalLinkage;
}
llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
const VarDecl *VD, bool IsConstant) {
llvm::GlobalValue::LinkageTypes
CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) {
GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
return getLLVMLinkageForDeclarator(VD, Linkage);
}
/// Replace the uses of a function that was declared with a non-proto type.
@ -5701,7 +5706,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
/*D=*/nullptr);
if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
LT = getLLVMLinkageVarDefinition(VD, D->getType().isConstQualified());
LT = getLLVMLinkageVarDefinition(VD);
else
LT = getFunctionLinkage(GD);
}
@ -6332,8 +6337,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
}
// Create a global variable for this lifetime-extended temporary.
llvm::GlobalValue::LinkageTypes Linkage =
getLLVMLinkageVarDefinition(VD, Constant);
llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD);
if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
const VarDecl *InitVD;
if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&

View file

@ -361,10 +361,19 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::DenseMap<llvm::StringRef, GlobalDecl> EmittedDeferredDecls;
void addEmittedDeferredDecl(GlobalDecl GD) {
if (!llvm::isa<FunctionDecl>(GD.getDecl()))
// Reemission is only needed in incremental mode.
if (!Context.getLangOpts().IncrementalExtensions)
return;
llvm::GlobalVariable::LinkageTypes L = getFunctionLinkage(GD);
if (llvm::GlobalValue::isLinkOnceLinkage(L) ||
// Assume a linkage by default that does not need reemission.
auto L = llvm::GlobalValue::ExternalLinkage;
if (llvm::isa<FunctionDecl>(GD.getDecl()))
L = getFunctionLinkage(GD);
else if (auto *VD = llvm::dyn_cast<VarDecl>(GD.getDecl()))
L = getLLVMLinkageVarDefinition(VD);
if (llvm::GlobalValue::isInternalLinkage(L) ||
llvm::GlobalValue::isLinkOnceLinkage(L) ||
llvm::GlobalValue::isWeakLinkage(L)) {
EmittedDeferredDecls[getMangledName(GD)] = GD;
}
@ -1321,12 +1330,11 @@ class CodeGenModule : public CodeGenTypeCache {
/// Returns LLVM linkage for a declarator.
llvm::GlobalValue::LinkageTypes
getLLVMLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage,
bool IsConstantVariable);
getLLVMLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage);
/// Returns LLVM linkage for a declarator.
llvm::GlobalValue::LinkageTypes
getLLVMLinkageVarDefinition(const VarDecl *VD, bool IsConstant);
getLLVMLinkageVarDefinition(const VarDecl *VD);
/// Emit all the global annotations.
void EmitGlobalAnnotations();

View file

@ -252,13 +252,11 @@ class CodeGenTypes {
/// this.
///
/// \param argTypes - must all actually be canonical as params
const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
bool instanceMethod,
bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs args);
const CGFunctionInfo &arrangeLLVMFunctionInfo(
CanQualType returnType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs args);
/// Compute a new LLVM record layout object for the given record.
std::unique_ptr<CGRecordLayout> ComputeRecordLayout(const RecordDecl *D,

View file

@ -2839,7 +2839,7 @@ static bool isThreadWrapperReplaceable(const VarDecl *VD,
static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
llvm::GlobalValue::LinkageTypes VarLinkage =
CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
CGM.getLLVMLinkageVarDefinition(VD);
// For internal linkage variables, we don't need an external or weak wrapper.
if (llvm::GlobalValue::isLocalLinkage(VarLinkage))

View file

@ -1379,8 +1379,7 @@ llvm::GlobalValue::LinkageTypes MicrosoftCXXABI::getCXXDestructorLinkage(
case Dtor_Base:
// The base destructor most closely tracks the user-declared constructor, so
// we delegate back to the normal declarator case.
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
/*IsConstantVariable=*/false);
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage);
case Dtor_Complete:
// The complete destructor is like an inline function, but it may be
// imported and therefore must be exported as well. This requires changing

View file

@ -148,6 +148,13 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
uint64_t ArraySize = ATy->getSize().getZExtValue();
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible to be
// passed via FARs in C++.
if (const auto *RTy = EltTy->getAs<RecordType>()) {
if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
for (uint64_t i = 0; i < ArraySize; ++i) {
if (!detectFARsEligibleStructHelper(EltTy, CurOff, Field1Ty, Field1Off,
@ -163,7 +170,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
// copy constructor are not eligible for the FP calling convention.
if (getRecordArgABI(Ty, CGT.getCXXABI()))
return false;
if (isEmptyRecord(getContext(), Ty, true))
if (isEmptyRecord(getContext(), Ty, true, true))
return true;
const RecordDecl *RD = RTy->getDecl();
// Unions aren't eligible unless they're empty (which is caught above).
@ -222,6 +229,8 @@ bool LoongArchABIInfo::detectFARsEligibleStruct(
if (!detectFARsEligibleStructHelper(Ty, CharUnits::Zero(), Field1Ty,
Field1Off, Field2Ty, Field2Off))
return false;
if (!Field1Ty)
return false;
// Not really a candidate if we have a single int but no float.
if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
return false;

View file

@ -8,7 +8,6 @@
#include "ABIInfoImpl.h"
#include "TargetInfo.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
using namespace clang;
using namespace clang::CodeGen;
@ -152,6 +151,13 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
uint64_t ArraySize = ATy->getSize().getZExtValue();
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible for
// the FP calling convention in C++.
if (const auto *RTy = EltTy->getAs<RecordType>()) {
if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
for (uint64_t i = 0; i < ArraySize; ++i) {
bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
@ -168,7 +174,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
// copy constructor are not eligible for the FP calling convention.
if (getRecordArgABI(Ty, CGT.getCXXABI()))
return false;
if (isEmptyRecord(getContext(), Ty, true))
if (isEmptyRecord(getContext(), Ty, true, true))
return true;
const RecordDecl *RD = RTy->getDecl();
// Unions aren't eligible unless they're empty (which is caught above).
@ -238,6 +244,8 @@ bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
NeededArgFPRs = 0;
bool IsCandidate = detectFPCCEligibleStructHelper(
Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
if (!Field1Ty)
return false;
// Not really a candidate if we have a single int but no float.
if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
return false;
@ -315,11 +323,15 @@ ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
const auto *BT = VT->getElementType()->castAs<BuiltinType>();
unsigned EltSize = getContext().getTypeSize(BT);
auto VScale =
getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts());
// The MinNumElts is simplified from equation:
// NumElts / VScale =
// (EltSize * NumElts / (VScale * RVVBitsPerBlock))
// * (RVVBitsPerBlock / EltSize)
llvm::ScalableVectorType *ResType =
llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()),
llvm::RISCV::RVVBitsPerBlock / EltSize);
llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()),
VT->getNumElements() / VScale->first);
return ABIArgInfo::getDirect(ResType);
}

View file

@ -140,7 +140,8 @@ class X86_32ABIInfo : public ABIInfo {
Class classify(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State,
bool isDelegateCall) const;
/// Updates the number of available free registers, returns
/// true if any registers were allocated.
@ -737,8 +738,8 @@ void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) c
}
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
CCState &State) const {
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
bool isDelegateCall) const {
// FIXME: Set alignment on indirect arguments.
bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
@ -753,6 +754,12 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
if (RAA == CGCXXABI::RAA_Indirect) {
return getIndirectResult(Ty, false, State);
} else if (isDelegateCall) {
// Avoid having different alignments on delegate call args by always
// setting the alignment to 4, which is what we do for inallocas.
ABIArgInfo Res = getIndirectResult(Ty, false, State);
Res.setIndirectAlign(CharUnits::fromQuantity(4));
return Res;
} else if (RAA == CGCXXABI::RAA_DirectInMemory) {
// The field index doesn't matter, we'll fix it up later.
return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
@ -940,7 +947,8 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (State.IsPreassigned.test(I))
continue;
Args[I].info = classifyArgumentType(Args[I].type, State);
Args[I].info =
classifyArgumentType(Args[I].type, State, FI.isDelegateCall());
UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
}

View file

@ -4741,13 +4741,8 @@ Action *Driver::ConstructPhaseAction(
}
case phases::Backend: {
if (isUsingLTO() && TargetDeviceOffloadKind == Action::OFK_None) {
types::ID Output;
if (Args.hasArg(options::OPT_S))
Output = types::TY_LTO_IR;
else if (Args.hasArg(options::OPT_ffat_lto_objects))
Output = types::TY_PP_Asm;
else
Output = types::TY_LTO_BC;
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
if (isUsingLTO(/* IsOffload */ true) &&
@ -4941,6 +4936,12 @@ void Driver::BuildJobs(Compilation &C) const {
(void)C.getArgs().hasArg(options::OPT_driver_mode);
(void)C.getArgs().hasArg(options::OPT_rsp_quoting);
bool HasAssembleJob = llvm::any_of(C.getJobs(), [](auto &J) {
// Match ClangAs and other derived assemblers of Tool. ClangAs uses a
// longer ShortName "clang integrated assembler" while other assemblers just
// use "assembler".
return strstr(J.getCreator().getShortName(), "assembler");
});
for (Arg *A : C.getArgs()) {
// FIXME: It would be nice to be able to send the argument to the
// DiagnosticsEngine, so that extra values, position, and so on could be
@ -4970,7 +4971,7 @@ void Driver::BuildJobs(Compilation &C) const {
// already been warned about.
if (!IsCLMode() || !A->getOption().matches(options::OPT_UNKNOWN)) {
if (A->getOption().hasFlag(options::TargetSpecific) &&
!A->isIgnoredTargetSpecific()) {
!A->isIgnoredTargetSpecific() && !HasAssembleJob) {
Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << getTargetTriple();
} else {

View file

@ -37,6 +37,8 @@ static const SanitizerMask NeedsUbsanCxxRt =
SanitizerKind::Vptr | SanitizerKind::CFI;
static const SanitizerMask NotAllowedWithTrap = SanitizerKind::Vptr;
static const SanitizerMask NotAllowedWithMinimalRuntime = SanitizerKind::Vptr;
static const SanitizerMask NotAllowedWithExecuteOnly =
SanitizerKind::Function | SanitizerKind::KCFI;
static const SanitizerMask RequiresPIE =
SanitizerKind::DataFlow | SanitizerKind::Scudo;
static const SanitizerMask NeedsUnwindTables =
@ -141,6 +143,16 @@ static std::string describeSanitizeArg(const llvm::opt::Arg *A,
/// Sanitizers set.
static std::string toString(const clang::SanitizerSet &Sanitizers);
/// Return true if an execute-only target disallows data access to code
/// sections.
static bool isExecuteOnlyTarget(const llvm::Triple &Triple,
const llvm::opt::ArgList &Args) {
if (Triple.isPS5())
return true;
return Args.hasFlagNoClaim(options::OPT_mexecute_only,
options::OPT_mno_execute_only, false);
}
static void validateSpecialCaseListFormat(const Driver &D,
std::vector<std::string> &SCLFiles,
unsigned MalformedSCLErrorDiagID,
@ -395,6 +407,22 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
DiagnosedKinds |= SanitizerKind::Function;
}
}
// -fsanitize=function and -fsanitize=kcfi instrument indirect function
// calls to load a type hash before the function label. Therefore, an
// execute-only target doesn't support the function and kcfi sanitizers.
const llvm::Triple &Triple = TC.getTriple();
if (isExecuteOnlyTarget(Triple, Args)) {
if (SanitizerMask KindsToDiagnose =
Add & NotAllowedWithExecuteOnly & ~DiagnosedKinds) {
if (DiagnoseErrors) {
std::string Desc = describeSanitizeArg(Arg, KindsToDiagnose);
D.Diag(diag::err_drv_argument_not_allowed_with)
<< Desc << Triple.str();
}
DiagnosedKinds |= KindsToDiagnose;
}
Add &= ~NotAllowedWithExecuteOnly;
}
// FIXME: Make CFI on member function calls compatible with cross-DSO CFI.
// There are currently two problems:
@ -457,6 +485,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (MinimalRuntime) {
Add &= ~NotAllowedWithMinimalRuntime;
}
// NotAllowedWithExecuteOnly is silently discarded on an execute-only
// target if implicitly enabled through group expansion.
if (isExecuteOnlyTarget(Triple, Args))
Add &= ~NotAllowedWithExecuteOnly;
if (CfiCrossDso)
Add &= ~SanitizerKind::CFIMFCall;
Add &= Supported;

View file

@ -427,6 +427,12 @@ ToolChain::getDefaultUnwindTableLevel(const ArgList &Args) const {
return UnwindTableLevel::None;
}
unsigned ToolChain::GetDefaultDwarfVersion() const {
// TODO: Remove the RISC-V special case when R_RISCV_SET_ULEB128 linker
// support becomes more widely available.
return getTriple().isRISCV() ? 4 : 5;
}
Tool *ToolChain::getClang() const {
if (!Clang)
Clang.reset(new tools::Clang(*this, useIntegratedBackend()));

View file

@ -30,6 +30,7 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
const bool IsArch32Bit = getToolChain().getTriple().isArch32Bit();
@ -38,6 +39,11 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
if (!IsArch32Bit && !IsArch64Bit)
llvm_unreachable("Unsupported bit width value.");
if (Arg *A = C.getArgs().getLastArg(options::OPT_G)) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << D.getTargetTriple();
}
// Specify the mode in which the as(1) command operates.
if (IsArch32Bit) {
CmdArgs.push_back("-a32");

View file

@ -12,6 +12,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/TargetParser/Host.h"
#include "llvm/TargetParser/LoongArchTargetParser.h"
using namespace clang::driver;
@ -126,23 +127,11 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
StringRef ArchName;
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
if (!llvm::LoongArch::isValidArchName(A->getValue())) {
D.Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
return;
}
std::string ArchName;
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
ArchName = A->getValue();
}
// TODO: handle -march=native and -mtune=xx.
// Select a default arch name.
if (ArchName.empty() && Triple.isLoongArch64())
ArchName = "loongarch64";
if (!ArchName.empty())
llvm::LoongArch::getArchFeatures(ArchName, Features);
ArchName = postProcessTargetCPUString(ArchName, Triple);
llvm::LoongArch::getArchFeatures(ArchName, Features);
// Select floating-point features determined by -mdouble-float,
// -msingle-float, -msoft-float and -mfpu.
@ -187,3 +176,25 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
if (Arg *A = Args.getLastArgNoClaim(options::OPT_mfpu_EQ))
A->ignoreTargetSpecific();
}
std::string loongarch::postProcessTargetCPUString(const std::string &CPU,
const llvm::Triple &Triple) {
std::string CPUString = CPU;
if (CPUString == "native") {
CPUString = llvm::sys::getHostCPUName();
if (CPUString == "generic")
CPUString = llvm::LoongArch::getDefaultArch(Triple.isLoongArch64());
}
if (CPUString.empty())
CPUString = llvm::LoongArch::getDefaultArch(Triple.isLoongArch64());
return CPUString;
}
std::string loongarch::getLoongArchTargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple) {
std::string CPU;
// If we have -march, use that.
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
CPU = A->getValue();
return postProcessTargetCPUString(CPU, Triple);
}

View file

@ -23,6 +23,12 @@ void getLoongArchTargetFeatures(const Driver &D, const llvm::Triple &Triple,
StringRef getLoongArchABI(const Driver &D, const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
std::string postProcessTargetCPUString(const std::string &CPU,
const llvm::Triple &Triple);
std::string getLoongArchTargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
} // end namespace loongarch
} // end namespace tools
} // end namespace driver

View file

@ -267,4 +267,10 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
<< A->getSpelling() << Scope;
}
}
// -mno-gather, -mno-scatter support
if (Args.hasArg(options::OPT_mno_gather))
Features.push_back("+prefer-no-gather");
if (Args.hasArg(options::OPT_mno_scatter))
Features.push_back("+prefer-no-scatter");
}

View file

@ -56,6 +56,7 @@
#include "llvm/Support/YAMLParser.h"
#include "llvm/TargetParser/ARMTargetParserCommon.h"
#include "llvm/TargetParser/Host.h"
#include "llvm/TargetParser/LoongArchTargetParser.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
#include <cctype>
@ -1853,10 +1854,20 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
void Clang::AddLoongArchTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const llvm::Triple &Triple = getToolChain().getTriple();
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(loongarch::getLoongArchABI(getToolChain().getDriver(), Args,
getToolChain().getTriple())
.data());
CmdArgs.push_back(
loongarch::getLoongArchABI(getToolChain().getDriver(), Args, Triple)
.data());
// Handle -mtune.
if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
std::string TuneCPU = A->getValue();
TuneCPU = loongarch::postProcessTargetCPUString(TuneCPU, Triple);
CmdArgs.push_back("-tune-cpu");
CmdArgs.push_back(Args.MakeArgString(TuneCPU));
}
}
void Clang::AddMIPSTargetArgs(const ArgList &Args,
@ -2052,6 +2063,12 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
} else if (V == "vec-extabi") {
VecExtabi = true;
A->claim();
} else if (V == "elfv1") {
ABIName = "elfv1";
A->claim();
} else if (V == "elfv2") {
ABIName = "elfv2";
A->claim();
} else if (V != "altivec")
// The ppc64 linux abis are all "altivec" abis by default. Accept and ignore
// the option if given as we don't have backend support for any targets
@ -7359,22 +7376,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (SplitLTOUnit)
CmdArgs.push_back("-fsplit-lto-unit");
if (Arg *A = Args.getLastArg(options::OPT_ffat_lto_objects,
options::OPT_fno_fat_lto_objects)) {
if (IsUsingLTO && A->getOption().matches(options::OPT_ffat_lto_objects)) {
assert(LTOMode == LTOK_Full || LTOMode == LTOK_Thin);
if (!Triple.isOSBinFormatELF()) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TC.getTripleString();
}
CmdArgs.push_back(Args.MakeArgString(
Twine("-flto=") + (LTOMode == LTOK_Thin ? "thin" : "full")));
CmdArgs.push_back("-flto-unit");
CmdArgs.push_back("-ffat-lto-objects");
A->render(Args, CmdArgs);
}
}
if (Arg *A = Args.getLastArg(options::OPT_fglobal_isel,
options::OPT_fno_global_isel)) {
CmdArgs.push_back("-mllvm");

View file

@ -474,6 +474,10 @@ std::string tools::getCPUName(const Driver &D, const ArgList &Args,
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
return std::string(getWebAssemblyTargetCPU(Args));
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
return loongarch::getLoongArchTargetCPU(Args, T);
}
}
@ -617,11 +621,6 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
PluginName + Suffix,
Plugin);
CmdArgs.push_back(Args.MakeArgString(Twine(PluginPrefix) + Plugin));
} else {
// Tell LLD to find and use .llvm.lto section in regular relocatable object
// files
if (Args.hasArg(options::OPT_ffat_lto_objects))
CmdArgs.push_back("--fat-lto-objects");
}
const char *PluginOptPrefix = IsOSAIX ? "-bplugin_opt:" : "-plugin-opt=";

View file

@ -1874,6 +1874,12 @@ static bool findBiarchMultilibs(const Driver &D,
.flag("-m64", /*Disallow=*/true)
.flag("-mx32")
.makeMultilib();
Multilib Alt32sparc = MultilibBuilder()
.gccSuffix("/sparcv8plus")
.includeSuffix("/sparcv8plus")
.flag("-m32")
.flag("-m64", /*Disallow=*/true)
.makeMultilib();
// GCC toolchain for IAMCU doesn't have crtbegin.o, so look for libgcc.a.
FilterNonExistent NonExistent(
@ -1885,10 +1891,14 @@ static bool findBiarchMultilibs(const Driver &D,
const bool IsX32 = TargetTriple.isX32();
if (TargetTriple.isArch32Bit() && !NonExistent(Alt32))
Want = WANT64;
if (TargetTriple.isArch32Bit() && !NonExistent(Alt32sparc))
Want = WANT64;
else if (TargetTriple.isArch64Bit() && IsX32 && !NonExistent(Altx32))
Want = WANT64;
else if (TargetTriple.isArch64Bit() && !IsX32 && !NonExistent(Alt64))
Want = WANT32;
else if (TargetTriple.isArch64Bit() && !NonExistent(Alt32sparc))
Want = WANT64;
else {
if (TargetTriple.isArch32Bit())
Want = NeedsBiarchSuffix ? WANT64 : WANT32;
@ -1919,6 +1929,7 @@ static bool findBiarchMultilibs(const Driver &D,
Result.Multilibs.push_back(Alt64);
Result.Multilibs.push_back(Alt32);
Result.Multilibs.push_back(Altx32);
Result.Multilibs.push_back(Alt32sparc);
Result.Multilibs.FilterOut(NonExistent);
@ -1932,7 +1943,8 @@ static bool findBiarchMultilibs(const Driver &D,
if (Result.SelectedMultilibs.back() == Alt64 ||
Result.SelectedMultilibs.back() == Alt32 ||
Result.SelectedMultilibs.back() == Altx32)
Result.SelectedMultilibs.back() == Altx32 ||
Result.SelectedMultilibs.back() == Alt32sparc)
Result.BiarchSibling = Default;
return true;
@ -2215,6 +2227,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// so we need to find those /usr/gcc/*/lib/gcc libdirs and go with
// /usr/gcc/<version> as a prefix.
SmallVector<std::pair<GCCVersion, std::string>, 8> SolarisPrefixes;
std::string PrefixDir = concat(SysRoot, "/usr/gcc");
std::error_code EC;
for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC),
@ -2232,8 +2245,13 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
if (!D.getVFS().exists(CandidateLibPath))
continue;
Prefixes.push_back(CandidatePrefix);
SolarisPrefixes.emplace_back(
std::make_pair(CandidateVersion, CandidatePrefix));
}
// Sort in reverse order so GCCInstallationDetector::init picks the latest.
std::sort(SolarisPrefixes.rbegin(), SolarisPrefixes.rend());
for (auto p : SolarisPrefixes)
Prefixes.emplace_back(p.second);
return;
}

View file

@ -383,6 +383,10 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
if (HTC.ShouldLinkCXXStdlib(Args))
HTC.AddCXXStdlibLibArgs(Args, CmdArgs);
}
const ToolChain::path_list &LibPaths = HTC.getFilePaths();
for (const auto &LibPath : LibPaths)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
Args.ClaimAllArgs(options::OPT_L);
return;
}
@ -441,6 +445,7 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
const ToolChain::path_list &LibPaths = HTC.getFilePaths();
for (const auto &LibPath : LibPaths)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
Args.ClaimAllArgs(options::OPT_L);
//----------------------------------------------------------------------------
//

View file

@ -47,11 +47,24 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
Exec, CmdArgs, Inputs, Output));
}
static bool getPIE(const ArgList &Args, const ToolChain &TC) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_r))
return false;
Arg *A = Args.getLastArg(options::OPT_pie, options::OPT_no_pie,
options::OPT_nopie);
if (!A)
return TC.isPIEDefault(Args);
return A->getOption().matches(options::OPT_pie);
}
void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
const bool IsPIE = getPIE(Args, getToolChain());
ArgStringList CmdArgs;
// Demangle C++ names in errors
@ -62,6 +75,11 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("_start");
}
if (IsPIE) {
CmdArgs.push_back("-z");
CmdArgs.push_back("type=pie");
}
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-Bstatic");
CmdArgs.push_back("-dn");
@ -113,8 +131,13 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
values_xpg = "values-xpg4.o";
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath(values_xpg)));
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
const char *crtbegin = nullptr;
if (Args.hasArg(options::OPT_shared) || IsPIE)
crtbegin = "crtbeginS.o";
else
crtbegin = "crtbegin.o";
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(crtbegin)));
// Add crtfastmath.o if available and fast math is enabled.
getToolChain().addFastMathRuntimeIfAvailable(Args, CmdArgs);
}
@ -151,24 +174,32 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lgcc");
CmdArgs.push_back("-lm");
}
const SanitizerArgs &SA = getToolChain().getSanitizerArgs(Args);
if (NeedsSanitizerDeps) {
linkSanitizerRuntimeDeps(getToolChain(), CmdArgs);
// Work around Solaris/amd64 ld bug when calling __tls_get_addr directly.
// However, ld -z relax=transtls is available since Solaris 11.2, but not
// in Illumos.
const SanitizerArgs &SA = getToolChain().getSanitizerArgs(Args);
if (getToolChain().getTriple().getArch() == llvm::Triple::x86_64 &&
(SA.needsAsanRt() || SA.needsStatsRt() ||
(SA.needsUbsanRt() && !SA.requiresMinimalRuntime())))
CmdArgs.push_back("-zrelax=transtls");
}
// Avoid AsanInitInternal cycle, Issue #64126.
if (getToolChain().getTriple().isX86() && SA.needsSharedRt() &&
SA.needsAsanRt())
CmdArgs.push_back("-znow");
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
options::OPT_r)) {
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
if (Args.hasArg(options::OPT_shared) || IsPIE)
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
else
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
}

View file

@ -581,7 +581,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
ProbablyBracedList =
ProbablyBracedList ||
(NextTok->is(tok::l_brace) && LBraceStack.back().PrevTok &&
LBraceStack.back().PrevTok->is(tok::identifier));
LBraceStack.back().PrevTok->isOneOf(tok::identifier,
tok::greater));
ProbablyBracedList =
ProbablyBracedList ||
@ -2464,7 +2465,7 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
const auto *PrevPrev = Prev ? Prev->getPreviousNonComment() : nullptr;
const bool Blacklisted =
PrevPrev &&
(PrevPrev->is(tok::kw___attribute) ||
(PrevPrev->isOneOf(tok::kw___attribute, tok::kw_decltype) ||
(SeenEqual &&
(PrevPrev->isOneOf(tok::kw_if, tok::kw_while) ||
PrevPrev->endsSequence(tok::kw_constexpr, tok::kw_if))));

View file

@ -15,6 +15,7 @@
#include "clang/Basic/FileEntry.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Basic/Sarif.h"
#include "clang/Basic/Stack.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@ -1150,6 +1151,10 @@ void ASTFrontendAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
if (!CI.hasPreprocessor())
return;
// This is a fallback: If the client forgets to invoke this, we mark the
// current stack as the bottom. Though not optimal, this could help prevent
// stack overflow during deep recursion.
clang::noteBottomOfStack();
// FIXME: Move the truncation aspect of this into Sema, we delayed this till
// here so the source manager would be initialized.

View file

@ -36,7 +36,7 @@
// because the OpenMP overlay requires constexpr functions here but prior to
// c++14 void return functions could not be constexpr.
#pragma push_macro("__DEVICE_VOID__")
#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
#if defined(__OPENMP_NVPTX__) && defined(__cplusplus) && __cplusplus < 201402L
#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
#else
#define __DEVICE_VOID__ __DEVICE__

View file

@ -317,7 +317,7 @@ __device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560 || 1
#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560
#define __DEPRECATED_SINCE_HIP_560(X) __attribute__((deprecated(X)))
#else
#define __DEPRECATED_SINCE_HIP_560(X)

View file

@ -328,14 +328,4 @@ static __inline int __get_cpuid_count (unsigned int __leaf,
return 1;
}
// If MS extensions are enabled, __cpuidex is defined as a builtin which will
// conflict with the __cpuidex definition below.
#ifndef _MSC_EXTENSIONS
static __inline void __cpuidex (int __cpu_info[4], int __leaf, int __subleaf)
{
__cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2],
__cpu_info[3]);
}
#endif
#endif /* __CPUID_H */

View file

@ -92,12 +92,19 @@ llvm::Error IncrementalExecutor::runCtors() const {
llvm::Expected<llvm::orc::ExecutorAddr>
IncrementalExecutor::getSymbolAddress(llvm::StringRef Name,
SymbolNameKind NameKind) const {
auto Sym = (NameKind == LinkerName) ? Jit->lookupLinkerMangled(Name)
: Jit->lookup(Name);
using namespace llvm::orc;
auto SO = makeJITDylibSearchOrder({&Jit->getMainJITDylib(),
Jit->getPlatformJITDylib().get(),
Jit->getProcessSymbolsJITDylib().get()});
if (!Sym)
return Sym.takeError();
return Sym;
ExecutionSession &ES = Jit->getExecutionSession();
auto SymOrErr =
ES.lookup(SO, (NameKind == LinkerName) ? ES.intern(Name)
: Jit->mangleAndIntern(Name));
if (auto Err = SymOrErr.takeError())
return std::move(Err);
return SymOrErr->getAddress();
}
} // end namespace clang

View file

@ -57,6 +57,26 @@ static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target) {
}
}
static unsigned getEncodingPrefixLen(tok::TokenKind kind) {
switch (kind) {
default:
llvm_unreachable("Unknown token type!");
case tok::char_constant:
case tok::string_literal:
return 0;
case tok::utf8_char_constant:
case tok::utf8_string_literal:
return 2;
case tok::wide_char_constant:
case tok::wide_string_literal:
case tok::utf16_char_constant:
case tok::utf16_string_literal:
case tok::utf32_char_constant:
case tok::utf32_string_literal:
return 1;
}
}
static CharSourceRange MakeCharSourceRange(const LangOptions &Features,
FullSourceLoc TokLoc,
const char *TokBegin,
@ -343,7 +363,9 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
diag::err_unevaluated_string_invalid_escape_sequence)
<< StringRef(EscapeBegin, ThisTokBuf - EscapeBegin);
HadError = true;
}
return ResultChar;
}
@ -1917,9 +1939,22 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
// Remember if we see any wide or utf-8/16/32 strings.
// Also check for illegal concatenations.
if (isUnevaluated() && Tok.getKind() != tok::string_literal) {
if (Diags)
Diags->Report(Tok.getLocation(), diag::err_unevaluated_string_prefix);
hadError = true;
if (Diags) {
SourceLocation PrefixEndLoc = Lexer::AdvanceToTokenCharacter(
Tok.getLocation(), getEncodingPrefixLen(Tok.getKind()), SM,
Features);
CharSourceRange Range =
CharSourceRange::getCharRange({Tok.getLocation(), PrefixEndLoc});
StringRef Prefix(SM.getCharacterData(Tok.getLocation()),
getEncodingPrefixLen(Tok.getKind()));
Diags->Report(Tok.getLocation(),
Features.CPlusPlus26
? diag::err_unevaluated_string_prefix
: diag::warn_unevaluated_string_prefix)
<< Prefix << Features.CPlusPlus << FixItHint::CreateRemoval(Range);
}
if (Features.CPlusPlus26)
hadError = true;
} else if (Tok.isNot(Kind) && Tok.isNot(tok::string_literal)) {
if (isOrdinary()) {
Kind = Tok.getKind();

View file

@ -1016,10 +1016,23 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) {
return nullptr;
}
if (isTokenStringLiteral())
AssertMessage = ParseUnevaluatedStringLiteralExpression();
else if (getLangOpts().CPlusPlus26)
bool ParseAsExpression = false;
if (getLangOpts().CPlusPlus26) {
for (unsigned I = 0;; ++I) {
const Token &T = GetLookAheadToken(I);
if (T.is(tok::r_paren))
break;
if (!tok::isStringLiteral(Tok.getKind())) {
ParseAsExpression = true;
break;
}
}
}
if (ParseAsExpression)
AssertMessage = ParseConstantExpressionInExprEvalContext();
else if (tok::isStringLiteral(Tok.getKind()))
AssertMessage = ParseUnevaluatedStringLiteralExpression();
else {
Diag(Tok, diag::err_expected_string_literal)
<< /*Source='static_assert'*/ 1;

View file

@ -62,6 +62,7 @@ bool Parser::isCXXDeclarationStatement(
case tok::kw_static_assert:
case tok::kw__Static_assert:
return true;
case tok::coloncolon:
case tok::identifier: {
if (DisambiguatingWithExpression) {
RevertingTentativeParsingAction TPA(*this);

View file

@ -123,6 +123,18 @@ ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
const NamedDecl *OffendingDecl) {
assert(K != AR_Available && "Expected an unavailable declaration here!");
// If this was defined using CF_OPTIONS, etc. then ignore the diagnostic.
auto DeclLoc = Ctx->getBeginLoc();
// This is only a problem in Foundation's C++ implementation for CF_OPTIONS.
if (DeclLoc.isMacroID() && S.getLangOpts().CPlusPlus &&
isa<TypedefDecl>(OffendingDecl)) {
StringRef MacroName = S.getPreprocessor().getImmediateMacroName(DeclLoc);
if (MacroName == "CF_OPTIONS" || MacroName == "OBJC_OPTIONS" ||
MacroName == "SWIFT_OPTIONS" || MacroName == "NS_OPTIONS") {
return false;
}
}
// Checks if we should emit the availability diagnostic in the context of C.
auto CheckContext = [&](const Decl *C) {
if (K == AR_NotYetIntroduced) {

View file

@ -935,6 +935,14 @@ void CastOperation::CheckDynamicCast() {
<< isClangCL;
}
// For a dynamic_cast to a final type, IR generation might emit a reference
// to the vtable.
if (DestRecord) {
auto *DestDecl = DestRecord->getAsCXXRecordDecl();
if (DestDecl->isEffectivelyFinal())
Self.MarkVTableUsed(OpRange.getBegin(), DestDecl);
}
// Done. Everything else is run-time checks.
Kind = CK_Dynamic;
}

View file

@ -9154,7 +9154,8 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
bool HasPrototype =
(D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
(D.getDeclSpec().isTypeRep() &&
D.getDeclSpec().getRepAsType().get()->isFunctionProtoType()) ||
SemaRef.GetTypeFromParser(D.getDeclSpec().getRepAsType(), nullptr)
->isFunctionProtoType()) ||
(!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
assert(
(HasPrototype || !SemaRef.getLangOpts().requiresStrictPrototypes()) &&

View file

@ -13880,56 +13880,6 @@ inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
return InvalidOperands(Loc, LHS, RHS);
}
// Diagnose cases where the user write a logical and/or but probably meant a
// bitwise one. We do this when one of the operands is a non-bool integer and
// the other is a constant.
void Sema::diagnoseLogicalInsteadOfBitwise(Expr *Op1, Expr *Op2,
SourceLocation Loc,
BinaryOperatorKind Opc) {
if (Op1->getType()->isIntegerType() && !Op1->getType()->isBooleanType() &&
Op2->getType()->isIntegerType() && !Op2->isValueDependent() &&
// Don't warn in macros or template instantiations.
!Loc.isMacroID() && !inTemplateInstantiation() &&
!Op2->getExprLoc().isMacroID() &&
!Op1->getExprLoc().isMacroID()) {
bool IsOp1InMacro = Op1->getExprLoc().isMacroID();
bool IsOp2InMacro = Op2->getExprLoc().isMacroID();
// Exclude the specific expression from triggering the warning.
if (!(IsOp1InMacro && IsOp2InMacro && Op1->getSourceRange() == Op2->getSourceRange())) {
// If the RHS can be constant folded, and if it constant folds to something
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
// Parens on the RHS are ignored.
// If the RHS can be constant folded, and if it constant folds to something
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
// Parens on the RHS are ignored.
Expr::EvalResult EVResult;
if (Op2->EvaluateAsInt(EVResult, Context)) {
llvm::APSInt Result = EVResult.Val.getInt();
if ((getLangOpts().Bool && !Op2->getType()->isBooleanType() &&
!Op2->getExprLoc().isMacroID()) ||
(Result != 0 && Result != 1)) {
Diag(Loc, diag::warn_logical_instead_of_bitwise)
<< Op2->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||");
// Suggest replacing the logical operator with the bitwise version
Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
<< (Opc == BO_LAnd ? "&" : "|")
<< FixItHint::CreateReplacement(
SourceRange(Loc, getLocForEndOfToken(Loc)),
Opc == BO_LAnd ? "&" : "|");
if (Opc == BO_LAnd)
// Suggest replacing "Foo() && kNonZero" with "Foo()"
Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
<< FixItHint::CreateRemoval(SourceRange(
getLocForEndOfToken(Op1->getEndLoc()), Op2->getEndLoc()));
}
}
}
}
}
// C99 6.5.[13,14]
inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
@ -13948,6 +13898,9 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
}
}
if (EnumConstantInBoolContext)
Diag(Loc, diag::warn_enum_constant_in_bool_context);
// WebAssembly tables can't be used with logical operators.
QualType LHSTy = LHS.get()->getType();
QualType RHSTy = RHS.get()->getType();
@ -13958,14 +13911,40 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
return InvalidOperands(Loc, LHS, RHS);
}
if (EnumConstantInBoolContext) {
// Warn when converting the enum constant to a boolean
Diag(Loc, diag::warn_enum_constant_in_bool_context);
} else {
// Diagnose cases where the user write a logical and/or but probably meant a
// bitwise one.
diagnoseLogicalInsteadOfBitwise(LHS.get(), RHS.get(), Loc, Opc);
diagnoseLogicalInsteadOfBitwise(RHS.get(), LHS.get(), Loc, Opc);
// Diagnose cases where the user write a logical and/or but probably meant a
// bitwise one. We do this when the LHS is a non-bool integer and the RHS
// is a constant.
if (!EnumConstantInBoolContext && LHS.get()->getType()->isIntegerType() &&
!LHS.get()->getType()->isBooleanType() &&
RHS.get()->getType()->isIntegerType() && !RHS.get()->isValueDependent() &&
// Don't warn in macros or template instantiations.
!Loc.isMacroID() && !inTemplateInstantiation()) {
// If the RHS can be constant folded, and if it constant folds to something
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
// Parens on the RHS are ignored.
Expr::EvalResult EVResult;
if (RHS.get()->EvaluateAsInt(EVResult, Context)) {
llvm::APSInt Result = EVResult.Val.getInt();
if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() &&
!RHS.get()->getExprLoc().isMacroID()) ||
(Result != 0 && Result != 1)) {
Diag(Loc, diag::warn_logical_instead_of_bitwise)
<< RHS.get()->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||");
// Suggest replacing the logical operator with the bitwise version
Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
<< (Opc == BO_LAnd ? "&" : "|")
<< FixItHint::CreateReplacement(
SourceRange(Loc, getLocForEndOfToken(Loc)),
Opc == BO_LAnd ? "&" : "|");
if (Opc == BO_LAnd)
// Suggest replacing "Foo() && kNonZero" with "Foo()"
Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
<< FixItHint::CreateRemoval(
SourceRange(getLocForEndOfToken(LHS.get()->getEndLoc()),
RHS.get()->getEndLoc()));
}
}
}
if (!Context.getLangOpts().CPlusPlus) {

View file

@ -19,6 +19,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
@ -9072,16 +9073,24 @@ Sema::BuildExprRequirement(
MultiLevelTemplateArgumentList MLTAL(Param, TAL.asArray(),
/*Final=*/false);
MLTAL.addOuterRetainedLevels(TPL->getDepth());
Expr *IDC = Param->getTypeConstraint()->getImmediatelyDeclaredConstraint();
const TypeConstraint *TC = Param->getTypeConstraint();
assert(TC && "Type Constraint cannot be null here");
auto *IDC = TC->getImmediatelyDeclaredConstraint();
assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
ExprResult Constraint = SubstExpr(IDC, MLTAL);
if (Constraint.isInvalid()) {
Status = concepts::ExprRequirement::SS_ExprSubstitutionFailure;
} else {
SubstitutedConstraintExpr =
cast<ConceptSpecializationExpr>(Constraint.get());
if (!SubstitutedConstraintExpr->isSatisfied())
Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
return new (Context) concepts::ExprRequirement(
concepts::createSubstDiagAt(*this, IDC->getExprLoc(),
[&](llvm::raw_ostream &OS) {
IDC->printPretty(OS, /*Helper=*/nullptr,
getPrintingPolicy());
}),
IsSimple, NoexceptLoc, ReturnTypeRequirement);
}
SubstitutedConstraintExpr =
cast<ConceptSpecializationExpr>(Constraint.get());
if (!SubstitutedConstraintExpr->isSatisfied())
Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
}
return new (Context) concepts::ExprRequirement(E, IsSimple, NoexceptLoc,
ReturnTypeRequirement, Status,

View file

@ -513,42 +513,21 @@ void LookupResult::resolveKind() {
const NamedDecl *HasNonFunction = nullptr;
llvm::SmallVector<const NamedDecl *, 4> EquivalentNonFunctions;
llvm::BitVector RemovedDecls(N);
for (unsigned I = 0; I < N; I++) {
unsigned UniqueTagIndex = 0;
unsigned I = 0;
while (I < N) {
const NamedDecl *D = Decls[I]->getUnderlyingDecl();
D = cast<NamedDecl>(D->getCanonicalDecl());
// Ignore an invalid declaration unless it's the only one left.
// Also ignore HLSLBufferDecl which not have name conflict with other Decls.
if ((D->isInvalidDecl() || isa<HLSLBufferDecl>(D)) &&
N - RemovedDecls.count() > 1) {
RemovedDecls.set(I);
if ((D->isInvalidDecl() || isa<HLSLBufferDecl>(D)) && !(I == 0 && N == 1)) {
Decls[I] = Decls[--N];
continue;
}
// C++ [basic.scope.hiding]p2:
// A class name or enumeration name can be hidden by the name of
// an object, function, or enumerator declared in the same
// scope. If a class or enumeration name and an object, function,
// or enumerator are declared in the same scope (in any order)
// with the same name, the class or enumeration name is hidden
// wherever the object, function, or enumerator name is visible.
if (HideTags && isa<TagDecl>(D)) {
bool Hidden = false;
for (auto *OtherDecl : Decls) {
if (canHideTag(OtherDecl) &&
getContextForScopeMatching(OtherDecl)->Equals(
getContextForScopeMatching(Decls[I]))) {
RemovedDecls.set(I);
Hidden = true;
break;
}
}
if (Hidden)
continue;
}
std::optional<unsigned> ExistingI;
// Redeclarations of types via typedef can occur both within a scope
@ -581,7 +560,7 @@ void LookupResult::resolveKind() {
if (isPreferredLookupResult(getSema(), getLookupKind(), Decls[I],
Decls[*ExistingI]))
Decls[*ExistingI] = Decls[I];
RemovedDecls.set(I);
Decls[I] = Decls[--N];
continue;
}
@ -592,6 +571,7 @@ void LookupResult::resolveKind() {
} else if (isa<TagDecl>(D)) {
if (HasTag)
Ambiguous = true;
UniqueTagIndex = I;
HasTag = true;
} else if (isa<FunctionTemplateDecl>(D)) {
HasFunction = true;
@ -607,7 +587,7 @@ void LookupResult::resolveKind() {
if (getSema().isEquivalentInternalLinkageDeclaration(HasNonFunction,
D)) {
EquivalentNonFunctions.push_back(D);
RemovedDecls.set(I);
Decls[I] = Decls[--N];
continue;
}
@ -615,6 +595,28 @@ void LookupResult::resolveKind() {
}
HasNonFunction = D;
}
I++;
}
// C++ [basic.scope.hiding]p2:
// A class name or enumeration name can be hidden by the name of
// an object, function, or enumerator declared in the same
// scope. If a class or enumeration name and an object, function,
// or enumerator are declared in the same scope (in any order)
// with the same name, the class or enumeration name is hidden
// wherever the object, function, or enumerator name is visible.
// But it's still an error if there are distinct tag types found,
// even if they're not visible. (ref?)
if (N > 1 && HideTags && HasTag && !Ambiguous &&
(HasFunction || HasNonFunction || HasUnresolved)) {
const NamedDecl *OtherDecl = Decls[UniqueTagIndex ? 0 : N - 1];
if (isa<TagDecl>(Decls[UniqueTagIndex]->getUnderlyingDecl()) &&
getContextForScopeMatching(Decls[UniqueTagIndex])->Equals(
getContextForScopeMatching(OtherDecl)) &&
canHideTag(OtherDecl))
Decls[UniqueTagIndex] = Decls[--N];
else
Ambiguous = true;
}
// FIXME: This diagnostic should really be delayed until we're done with
@ -623,15 +625,9 @@ void LookupResult::resolveKind() {
getSema().diagnoseEquivalentInternalLinkageDeclarations(
getNameLoc(), HasNonFunction, EquivalentNonFunctions);
// Remove decls by replacing them with decls from the end (which
// means that we need to iterate from the end) and then truncating
// to the new size.
for (int I = RemovedDecls.find_last(); I >= 0; I = RemovedDecls.find_prev(I))
Decls[I] = Decls[--N];
Decls.truncate(N);
if ((HasNonFunction && (HasFunction || HasUnresolved)) ||
(HideTags && HasTag && (HasFunction || HasNonFunction || HasUnresolved)))
if (HasNonFunction && (HasFunction || HasUnresolved))
Ambiguous = true;
if (Ambiguous)

View file

@ -2276,9 +2276,9 @@ QualType TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
getPackIndex(Pack), Arg, TL.getNameLoc());
}
template<typename EntityPrinter>
static concepts::Requirement::SubstitutionDiagnostic *
createSubstDiag(Sema &S, TemplateDeductionInfo &Info, EntityPrinter Printer) {
createSubstDiag(Sema &S, TemplateDeductionInfo &Info,
concepts::EntityPrinter Printer) {
SmallString<128> Message;
SourceLocation ErrorLoc;
if (Info.hasSFINAEDiagnostic()) {
@ -2302,6 +2302,19 @@ createSubstDiag(Sema &S, TemplateDeductionInfo &Info, EntityPrinter Printer) {
StringRef(MessageBuf, Message.size())};
}
concepts::Requirement::SubstitutionDiagnostic *
concepts::createSubstDiagAt(Sema &S, SourceLocation Location,
EntityPrinter Printer) {
SmallString<128> Entity;
llvm::raw_svector_ostream OS(Entity);
Printer(OS);
char *EntityBuf = new (S.Context) char[Entity.size()];
llvm::copy(Entity, EntityBuf);
return new (S.Context) concepts::Requirement::SubstitutionDiagnostic{
/*SubstitutedEntity=*/StringRef(EntityBuf, Entity.size()),
/*DiagLoc=*/Location, /*DiagMessage=*/StringRef()};
}
ExprResult TemplateInstantiator::TransformRequiresTypeParams(
SourceLocation KWLoc, SourceLocation RBraceLoc, const RequiresExpr *RE,
RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> Params,

View file

@ -7478,6 +7478,10 @@ StmtResult
TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
bool IsStmtExpr) {
Sema::CompoundScopeRAII CompoundScope(getSema());
Sema::FPFeaturesStateRAII FPSave(getSema());
if (S->hasStoredFPFeatures())
getSema().resetFPOptions(
S->getStoredFPFeatures().applyOverrides(getSema().getLangOpts()));
const Stmt *ExprResult = S->getStmtExprResult();
bool SubStmtInvalid = false;

View file

@ -181,6 +181,13 @@ namespace clang {
static void setAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC,
unsigned Index, NamedDecl *D);
/// Commit to a primary definition of the class RD, which is known to be
/// a definition of the class. We might not have read the definition data
/// for it yet. If we haven't then allocate placeholder definition data
/// now too.
static CXXRecordDecl *getOrFakePrimaryClassDefinition(ASTReader &Reader,
CXXRecordDecl *RD);
/// Results from loading a RedeclarableDecl.
class RedeclarableResult {
Decl *MergeWith;
@ -598,7 +605,13 @@ void ASTDeclReader::VisitDecl(Decl *D) {
auto *LexicalDC = readDeclAs<DeclContext>();
if (!LexicalDC)
LexicalDC = SemaDC;
DeclContext *MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
// If the context is a class, we might not have actually merged it yet, in
// the case where the definition comes from an update record.
DeclContext *MergedSemaDC;
if (auto *RD = dyn_cast<CXXRecordDecl>(SemaDC))
MergedSemaDC = getOrFakePrimaryClassDefinition(Reader, RD);
else
MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
// Avoid calling setLexicalDeclContext() directly because it uses
// Decl::getASTContext() internally which is unsafe during derialization.
D->setDeclContextsImpl(MergedSemaDC ? MergedSemaDC : SemaDC, LexicalDC,
@ -3198,6 +3211,32 @@ uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint64_t LocalOffset) {
return LocalOffset + M.GlobalBitOffset;
}
CXXRecordDecl *
ASTDeclReader::getOrFakePrimaryClassDefinition(ASTReader &Reader,
CXXRecordDecl *RD) {
// Try to dig out the definition.
auto *DD = RD->DefinitionData;
if (!DD)
DD = RD->getCanonicalDecl()->DefinitionData;
// If there's no definition yet, then DC's definition is added by an update
// record, but we've not yet loaded that update record. In this case, we
// commit to DC being the canonical definition now, and will fix this when
// we load the update record.
if (!DD) {
DD = new (Reader.getContext()) struct CXXRecordDecl::DefinitionData(RD);
RD->setCompleteDefinition(true);
RD->DefinitionData = DD;
RD->getCanonicalDecl()->DefinitionData = DD;
// Track that we did this horrible thing so that we can fix it later.
Reader.PendingFakeDefinitionData.insert(
std::make_pair(DD, ASTReader::PendingFakeDefinitionKind::Fake));
}
return DD->Definition;
}
/// Find the context in which we should search for previous declarations when
/// looking for declarations to merge.
DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
@ -3205,29 +3244,8 @@ DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
if (auto *ND = dyn_cast<NamespaceDecl>(DC))
return ND->getOriginalNamespace();
if (auto *RD = dyn_cast<CXXRecordDecl>(DC)) {
// Try to dig out the definition.
auto *DD = RD->DefinitionData;
if (!DD)
DD = RD->getCanonicalDecl()->DefinitionData;
// If there's no definition yet, then DC's definition is added by an update
// record, but we've not yet loaded that update record. In this case, we
// commit to DC being the canonical definition now, and will fix this when
// we load the update record.
if (!DD) {
DD = new (Reader.getContext()) struct CXXRecordDecl::DefinitionData(RD);
RD->setCompleteDefinition(true);
RD->DefinitionData = DD;
RD->getCanonicalDecl()->DefinitionData = DD;
// Track that we did this horrible thing so that we can fix it later.
Reader.PendingFakeDefinitionData.insert(
std::make_pair(DD, ASTReader::PendingFakeDefinitionKind::Fake));
}
return DD->Definition;
}
if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
return getOrFakePrimaryClassDefinition(Reader, RD);
if (auto *RD = dyn_cast<RecordDecl>(DC))
return RD->getDefinition();

View file

@ -580,7 +580,7 @@ void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
}
void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
static_assert(DeclContext::NumFunctionDeclBits == 30,
static_assert(DeclContext::NumFunctionDeclBits == 31,
"You need to update the serializer after you change the "
"FunctionDeclBits");
@ -1495,7 +1495,7 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
}
void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
static_assert(DeclContext::NumCXXConstructorDeclBits == 21,
static_assert(DeclContext::NumCXXConstructorDeclBits == 20,
"You need to update the serializer after you change the "
"CXXConstructorDeclBits");

View file

@ -3773,6 +3773,33 @@ SYMBOL(viewable_range, std::ranges::, <ranges>)
SYMBOL(wistream_view, std::ranges::, <ranges>)
SYMBOL(zip_transform_view, std::ranges::, <ranges>)
SYMBOL(zip_view, std::ranges::, <ranges>)
SYMBOL(all, std::ranges::views::, <ranges>)
SYMBOL(all_t, std::ranges::views::, <ranges>)
SYMBOL(as_const, std::ranges::views::, <ranges>)
SYMBOL(as_rvalue, std::ranges::views::, <ranges>)
SYMBOL(common, std::ranges::views::, <ranges>)
SYMBOL(counted, std::ranges::views::, <ranges>)
SYMBOL(drop, std::ranges::views::, <ranges>)
SYMBOL(drop_while, std::ranges::views::, <ranges>)
SYMBOL(elements, std::ranges::views::, <ranges>)
SYMBOL(empty, std::ranges::views::, <ranges>)
SYMBOL(filter, std::ranges::views::, <ranges>)
SYMBOL(iota, std::ranges::views::, <ranges>)
SYMBOL(istream, std::ranges::views::, <ranges>)
SYMBOL(istream, std::ranges::views::, <iosfwd>)
SYMBOL(join, std::ranges::views::, <ranges>)
SYMBOL(join_with, std::ranges::views::, <ranges>)
SYMBOL(keys, std::ranges::views::, <ranges>)
SYMBOL(lazy_split, std::ranges::views::, <ranges>)
SYMBOL(reverse, std::ranges::views::, <ranges>)
SYMBOL(single, std::ranges::views::, <ranges>)
SYMBOL(split, std::ranges::views::, <ranges>)
SYMBOL(take, std::ranges::views::, <ranges>)
SYMBOL(take_while, std::ranges::views::, <ranges>)
SYMBOL(transform, std::ranges::views::, <ranges>)
SYMBOL(values, std::ranges::views::, <ranges>)
SYMBOL(zip, std::ranges::views::, <ranges>)
SYMBOL(zip_transform, std::ranges::views::, <ranges>)
SYMBOL(ECMAScript, std::regex_constants::, <regex>)
SYMBOL(awk, std::regex_constants::, <regex>)
SYMBOL(basic, std::regex_constants::, <regex>)
@ -3817,3 +3844,30 @@ SYMBOL(get_id, std::this_thread::, <thread>)
SYMBOL(sleep_for, std::this_thread::, <thread>)
SYMBOL(sleep_until, std::this_thread::, <thread>)
SYMBOL(yield, std::this_thread::, <thread>)
SYMBOL(all, std::views::, <ranges>)
SYMBOL(all_t, std::views::, <ranges>)
SYMBOL(as_const, std::views::, <ranges>)
SYMBOL(as_rvalue, std::views::, <ranges>)
SYMBOL(common, std::views::, <ranges>)
SYMBOL(counted, std::views::, <ranges>)
SYMBOL(drop, std::views::, <ranges>)
SYMBOL(drop_while, std::views::, <ranges>)
SYMBOL(elements, std::views::, <ranges>)
SYMBOL(empty, std::views::, <ranges>)
SYMBOL(filter, std::views::, <ranges>)
SYMBOL(iota, std::views::, <ranges>)
SYMBOL(istream, std::views::, <ranges>)
SYMBOL(istream, std::views::, <iosfwd>)
SYMBOL(join, std::views::, <ranges>)
SYMBOL(join_with, std::views::, <ranges>)
SYMBOL(keys, std::views::, <ranges>)
SYMBOL(lazy_split, std::views::, <ranges>)
SYMBOL(reverse, std::views::, <ranges>)
SYMBOL(single, std::views::, <ranges>)
SYMBOL(split, std::views::, <ranges>)
SYMBOL(take, std::views::, <ranges>)
SYMBOL(take_while, std::views::, <ranges>)
SYMBOL(transform, std::views::, <ranges>)
SYMBOL(values, std::views::, <ranges>)
SYMBOL(zip, std::views::, <ranges>)
SYMBOL(zip_transform, std::views::, <ranges>)

View file

@ -588,19 +588,34 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
return REAL(strncpy)(to, from, size);
}
INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strtol);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(strtol)(nptr, endptr, base);
}
template <typename Fn>
static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr,
char **endptr, int base)
-> decltype(real(nullptr, nullptr, 0)) {
if (!flags()->replace_str)
return real(nptr, endptr, base);
char *real_endptr;
long result = REAL(strtol)(nptr, &real_endptr, base);
auto res = real(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return result;
return res;
}
# define INTERCEPTOR_STRTO_BASE(ret_type, func) \
INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \
void *ctx; \
ASAN_INTERCEPTOR_ENTER(ctx, func); \
ENSURE_ASAN_INITED(); \
return StrtolImpl(ctx, REAL(func), nptr, endptr, base); \
}
INTERCEPTOR_STRTO_BASE(long, strtol)
INTERCEPTOR_STRTO_BASE(long long, strtoll)
# if SANITIZER_GLIBC
INTERCEPTOR_STRTO_BASE(long, __isoc23_strtol)
INTERCEPTOR_STRTO_BASE(long long, __isoc23_strtoll)
# endif
INTERCEPTOR(int, atoi, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoi);
@ -639,20 +654,6 @@ INTERCEPTOR(long, atol, const char *nptr) {
return result;
}
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
INTERCEPTOR(long long, strtoll, const char *nptr, char **endptr, int base) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(strtoll)(nptr, endptr, base);
}
char *real_endptr;
long long result = REAL(strtoll)(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return result;
}
INTERCEPTOR(long long, atoll, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoll);
@ -666,7 +667,6 @@ INTERCEPTOR(long long, atoll, const char *nptr) {
ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
#if ASAN_INTERCEPT___CXA_ATEXIT || ASAN_INTERCEPT_ATEXIT
static void AtCxaAtexit(void *unused) {
@ -751,11 +751,13 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(atoi);
ASAN_INTERCEPT_FUNC(atol);
ASAN_INTERCEPT_FUNC(strtol);
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
ASAN_INTERCEPT_FUNC(atoll);
ASAN_INTERCEPT_FUNC(strtol);
ASAN_INTERCEPT_FUNC(strtoll);
#endif
# if SANITIZER_GLIBC
ASAN_INTERCEPT_FUNC(__isoc23_strtol);
ASAN_INTERCEPT_FUNC(__isoc23_strtoll);
# endif
// Intecept jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);

View file

@ -42,12 +42,10 @@ void InitializePlatformInterceptors();
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0

View file

@ -65,6 +65,7 @@ INTERCEPT_WRAP_W_W(_expand_dbg)
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
INTERCEPT_LIBRARY_FUNCTION(atoll);
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
#if SANITIZER_INTERCEPT_MEMCHR
@ -91,6 +92,7 @@ INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
INTERCEPT_LIBRARY_FUNCTION(strtok);
INTERCEPT_LIBRARY_FUNCTION(strtol);
INTERCEPT_LIBRARY_FUNCTION(strtoll);
INTERCEPT_LIBRARY_FUNCTION(wcslen);
INTERCEPT_LIBRARY_FUNCTION(wcsnlen);

View file

@ -7,7 +7,7 @@
// Out-of-line LSE atomics helpers. Ported from libgcc library.
// N = {1, 2, 4, 8}
// M = {1, 2, 4, 8, 16}
// ORDER = {'relax', 'acq', 'rel', 'acq_rel'}
// ORDER = {'relax', 'acq', 'rel', 'acq_rel', 'sync'}
// Routines implemented:
//
// iM __aarch64_casM_ORDER(iM expected, iM desired, iM *ptr)
@ -35,8 +35,8 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
// Generate mnemonics for
// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4
// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4
// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4,5
// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4,5
#if SIZE == 1
#define S b
@ -64,24 +64,44 @@ HIDDEN(___aarch64_have_lse_atomics)
#define L
#define M 0x000000
#define N 0x000000
#define BARRIER
#elif MODEL == 2
#define SUFF _acq
#define A a
#define L
#define M 0x400000
#define N 0x800000
#define BARRIER
#elif MODEL == 3
#define SUFF _rel
#define A
#define L l
#define M 0x008000
#define N 0x400000
#define BARRIER
#elif MODEL == 4
#define SUFF _acq_rel
#define A a
#define L l
#define M 0x408000
#define N 0xc00000
#define BARRIER
#elif MODEL == 5
#define SUFF _sync
#ifdef L_swp
// swp has _acq semantics.
#define A a
#define L
#define M 0x400000
#define N 0x800000
#else
// All other _sync functions have _seq semantics.
#define A a
#define L l
#define M 0x408000
#define N 0xc00000
#endif
#define BARRIER dmb ish
#else
#error
#endif // MODEL
@ -96,7 +116,12 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
#define NAME(BASE) GLUE4(__aarch64_, BASE, SIZE, SUFF)
#if MODEL == 5
// Drop A for _sync functions.
#define LDXR GLUE3(ld, xr, S)
#else
#define LDXR GLUE4(ld, A, xr, S)
#endif
#define STXR GLUE4(st, L, xr, S)
// Define temporary registers.
@ -136,9 +161,15 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXR w(tmp1), s(1), [x2]
cbnz w(tmp1), 0b
1:
BARRIER
ret
#else
#if MODEL == 5
// Drop A for _sync functions.
#define LDXP GLUE2(ld, xp)
#else
#define LDXP GLUE3(ld, A, xp)
#endif
#define STXP GLUE3(st, L, xp)
#ifdef HAS_ASM_LSE
#define CASP GLUE3(casp, A, L) x0, x1, x2, x3, [x4]
@ -159,6 +190,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXP w(tmp2), x2, x3, [x4]
cbnz w(tmp2), 0b
1:
BARRIER
ret
#endif
END_COMPILERRT_OUTLINE_FUNCTION(NAME(cas))
@ -180,6 +212,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(swp))
LDXR s(0), [x1]
STXR w(tmp1), s(tmp0), [x1]
cbnz w(tmp1), 0b
BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(swp))
#endif // L_swp
@ -224,6 +257,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(LDNM))
OP s(tmp1), s(0), s(tmp0)
STXR w(tmp2), s(tmp1), [x1]
cbnz w(tmp2), 0b
BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM))
#endif // L_ldadd L_ldclr L_ldeor L_ldset

View file

@ -113,7 +113,7 @@ void __clear_cache(void *start, void *end) {
#elif defined(__linux__) || defined(__OpenBSD__)
// Pre-R6 may not be globalized. And some implementations may give strange
// synci_step. So, let's use libc call for it.
cacheflush(start, end_int - start_int, BCACHE);
_flush_cache(start, end_int - start_int, BCACHE);
#else
(void)start_int;
(void)end_int;

View file

@ -751,8 +751,11 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
if (HasLeaf7 && ((EDX >> 8) & 1) && HasAVX512Save)
setFeature(FEATURE_AVX512VP2INTERSECT);
// EAX from subleaf 0 is the maximum subleaf supported. Some CPUs don't
// return all 0s for invalid subleaves so check the limit.
bool HasLeaf7Subleaf1 =
MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX);
HasLeaf7 && EAX >= 1 &&
!getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX);
if (HasLeaf7Subleaf1 && ((EAX >> 5) & 1) && HasAVX512Save)
setFeature(FEATURE_AVX512BF16);

View file

@ -181,7 +181,7 @@ const interpose_substitution substitution_##func_name[] \
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
// priority than weak ones so weak aliases won't work for indirect calls
// in position-independent (-fPIC / -fPIE) mode.
# define __ASM_WEAK_WRAPPER(func)
# define __ASM_WEAK_WRAPPER(func) ".globl " #func "\n"
# else
# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD

View file

@ -464,6 +464,25 @@ INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t)
#if SANITIZER_GLIBC
INTERCEPTORS_STRTO(double, __isoc23_strtod, char)
INTERCEPTORS_STRTO(float, __isoc23_strtof, char)
INTERCEPTORS_STRTO(long double, __isoc23_strtold, char)
INTERCEPTORS_STRTO_BASE(long, __isoc23_strtol, char)
INTERCEPTORS_STRTO_BASE(long long, __isoc23_strtoll, char)
INTERCEPTORS_STRTO_BASE(unsigned long, __isoc23_strtoul, char)
INTERCEPTORS_STRTO_BASE(unsigned long long, __isoc23_strtoull, char)
INTERCEPTORS_STRTO_BASE(u64, __isoc23_strtouq, char)
INTERCEPTORS_STRTO(double, __isoc23_wcstod, wchar_t)
INTERCEPTORS_STRTO(float, __isoc23_wcstof, wchar_t)
INTERCEPTORS_STRTO(long double, __isoc23_wcstold, wchar_t)
INTERCEPTORS_STRTO_BASE(long, __isoc23_wcstol, wchar_t)
INTERCEPTORS_STRTO_BASE(long long, __isoc23_wcstoll, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long, __isoc23_wcstoul, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long long, __isoc23_wcstoull, wchar_t)
#endif
#if SANITIZER_NETBSD
#define INTERCEPT_STRTO(func) \
INTERCEPT_FUNCTION(func); \
@ -1748,6 +1767,24 @@ void InitializeInterceptors() {
INTERCEPT_STRTO(wcstoul);
INTERCEPT_STRTO(wcstoll);
INTERCEPT_STRTO(wcstoull);
#if SANITIZER_GLIBC
INTERCEPT_STRTO(__isoc23_strtod);
INTERCEPT_STRTO(__isoc23_strtof);
INTERCEPT_STRTO(__isoc23_strtold);
INTERCEPT_STRTO(__isoc23_strtol);
INTERCEPT_STRTO(__isoc23_strtoul);
INTERCEPT_STRTO(__isoc23_strtoll);
INTERCEPT_STRTO(__isoc23_strtoull);
INTERCEPT_STRTO(__isoc23_strtouq);
INTERCEPT_STRTO(__isoc23_wcstod);
INTERCEPT_STRTO(__isoc23_wcstof);
INTERCEPT_STRTO(__isoc23_wcstold);
INTERCEPT_STRTO(__isoc23_wcstol);
INTERCEPT_STRTO(__isoc23_wcstoul);
INTERCEPT_STRTO(__isoc23_wcstoll);
INTERCEPT_STRTO(__isoc23_wcstoull);
#endif
#ifdef SANITIZER_NLDBL_VERSION
INTERCEPT_FUNCTION_VER(vswprintf, SANITIZER_NLDBL_VERSION);
INTERCEPT_FUNCTION_VER(swprintf, SANITIZER_NLDBL_VERSION);

View file

@ -424,10 +424,13 @@ static void createProfileDir(const char *Filename) {
* its instrumented shared libraries dump profile data into their own data file.
*/
static FILE *openFileForMerging(const char *ProfileFileName, int *MergeDone) {
FILE *ProfileFile = getProfileFile();
FILE *ProfileFile = NULL;
int rc;
if (!ProfileFile) {
ProfileFile = getProfileFile();
if (ProfileFile) {
lprofLockFileHandle(ProfileFile);
} else {
createProfileDir(ProfileFileName);
ProfileFile = lprofOpenFileEx(ProfileFileName);
}
@ -478,6 +481,9 @@ static int writeFile(const char *OutputName) {
if (OutputFile == getProfileFile()) {
fflush(OutputFile);
if (doMerging()) {
lprofUnlockFileHandle(OutputFile);
}
} else {
fclose(OutputFile);
}

View file

@ -1491,6 +1491,16 @@ VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
INTERCEPTOR(int, __isoc23_vscanf, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc23_vscanf, false, format, ap)
INTERCEPTOR(int, __isoc23_vsscanf, const char *str, const char *format,
va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc23_vsscanf, false, str, format, ap)
INTERCEPTOR(int, __isoc23_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc23_vfscanf, false, stream, format, ap)
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
INTERCEPTOR(int, scanf, const char *format, ...)
@ -1511,6 +1521,15 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
INTERCEPTOR(int, __isoc23_scanf, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc23_scanf, __isoc23_vscanf, format)
INTERCEPTOR(int, __isoc23_fscanf, void *stream, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc23_fscanf, __isoc23_vfscanf, stream, format)
INTERCEPTOR(int, __isoc23_sscanf, const char *str, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc23_sscanf, __isoc23_vsscanf, str, format)
#endif
#endif
@ -1534,7 +1553,13 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);
COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc23_scanf); \
COMMON_INTERCEPT_FUNCTION(__isoc23_sscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc23_fscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc23_vscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc23_vsscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc23_vfscanf);
#else
#define INIT_ISOC99_SCANF
#endif
@ -3539,30 +3564,26 @@ UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
(real_endptr - nptr) + 1 : 0);
}
#if SANITIZER_INTERCEPT_STRTOIMAX
INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
template <typename Fn>
static ALWAYS_INLINE auto StrtoimaxImpl(void *ctx, Fn real, const char *nptr,
char **endptr, int base)
-> decltype(real(nullptr, nullptr, 0)) {
char *real_endptr;
INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
auto res = real(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return res;
}
INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
return StrtoimaxImpl(ctx, REAL(strtoimax), nptr, endptr, base);
}
INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *real_endptr;
UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return res;
return StrtoimaxImpl(ctx, REAL(strtoumax), nptr, endptr, base);
}
#define INIT_STRTOIMAX \
@ -3572,6 +3593,25 @@ INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
#define INIT_STRTOIMAX
#endif
#if SANITIZER_INTERCEPT_STRTOIMAX && SANITIZER_GLIBC
INTERCEPTOR(INTMAX_T, __isoc23_strtoimax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoimax, nptr, endptr, base);
return StrtoimaxImpl(ctx, REAL(__isoc23_strtoimax), nptr, endptr, base);
}
INTERCEPTOR(UINTMAX_T, __isoc23_strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoumax, nptr, endptr, base);
return StrtoimaxImpl(ctx, REAL(__isoc23_strtoumax), nptr, endptr, base);
}
# define INIT_STRTOIMAX_C23 \
COMMON_INTERCEPT_FUNCTION(__isoc23_strtoimax); \
COMMON_INTERCEPT_FUNCTION(__isoc23_strtoumax);
#else
# define INIT_STRTOIMAX_C23
#endif
#if SANITIZER_INTERCEPT_MBSTOWCS
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
void *ctx;
@ -10304,6 +10344,7 @@ static void InitializeCommonInterceptors() {
INIT_GETCWD;
INIT_GET_CURRENT_DIR_NAME;
INIT_STRTOIMAX;
INIT_STRTOIMAX_C23;
INIT_MBSTOWCS;
INIT_MBSNRTOWCS;
INIT_WCSTOMBS;

View file

@ -340,11 +340,19 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
// For %ms/%mc, write the allocated output buffer as well.
// For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
if (dir.allocate) {
char *buf = *(char **)argp;
if (buf)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
if (char *buf = *(char **)argp) {
if (dir.convSpecifier == 'c')
size = 1;
else if (dir.convSpecifier == 'C')
size = sizeof(wchar_t);
else if (dir.convSpecifier == 'S')
size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
else // 's' or '['
size = internal_strlen(buf) + 1;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
}
}
}
}

View file

@ -30,13 +30,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
// TODO(yln): add arg sanity check for stack_top/stack_bottom
CHECK_GE(max_depth, 2);
const uptr kPageSize = GetPageSizeCached();
#if defined(__GNUC__)
// __builtin_return_address returns the address of the call instruction
// on the SPARC and not the return address, so we need to compensate.
trace_buffer[0] = GetNextInstructionPc(pc);
#else
trace_buffer[0] = pc;
#endif
size = 1;
if (stack_top < 4096) return; // Sanity check for stack top.
// Flush register windows to memory

View file

@ -139,13 +139,7 @@ void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
if (to_pop == 0 && size > 1)
to_pop = 1;
PopStackFrames(to_pop);
#if defined(__GNUC__) && defined(__sparc__)
// __builtin_return_address returns the address of the call instruction
// on the SPARC and not the return address, so we need to compensate.
trace_buffer[0] = GetNextInstructionPc(pc);
#else
trace_buffer[0] = pc;
#endif
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {

View file

@ -34,6 +34,13 @@ __interceptor_pthread_setspecific w
__interceptor_read w
__interceptor_realpath w
__isinf U
__isoc23_sscanf U
__isoc23_strtol U
__isoc23_strtoll U
__isoc23_strtoll_l U
__isoc23_strtoull U
__isoc23_strtoull_l U
__isoc23_vsscanf U
__isoc99_sscanf U
__isoc99_vsscanf U
__moddi3 U

View file

@ -17,6 +17,7 @@
#include <__type_traits/is_execution_policy.h>
#include <__type_traits/remove_cvref.h>
#include <__utility/forward.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header

View file

@ -208,19 +208,16 @@
// HARDENING {
// TODO(hardening): remove this in LLVM 18.
// This is for backward compatibility -- make enabling `_LIBCPP_ENABLE_ASSERTIONS` (which predates hardening modes)
// equivalent to setting the hardened mode.
# ifdef _LIBCPP_ENABLE_ASSERTIONS
# warning "_LIBCPP_ENABLE_ASSERTIONS is deprecated, please use _LIBCPP_ENABLE_HARDENED_MODE instead."
# if _LIBCPP_ENABLE_ASSERTIONS != 0 && _LIBCPP_ENABLE_ASSERTIONS != 1
# error "_LIBCPP_ENABLE_ASSERTIONS must be set to 0 or 1"
# endif
# if _LIBCPP_ENABLE_ASSERTIONS
# define _LIBCPP_ENABLE_HARDENED_MODE 1
# endif
# ifndef _LIBCPP_ENABLE_ASSERTIONS
# define _LIBCPP_ENABLE_ASSERTIONS _LIBCPP_ENABLE_ASSERTIONS_DEFAULT
# endif
# if _LIBCPP_ENABLE_ASSERTIONS != 0 && _LIBCPP_ENABLE_ASSERTIONS != 1
# error "_LIBCPP_ENABLE_ASSERTIONS must be set to 0 or 1"
# endif
// NOTE: These modes are experimental and are not stable yet in LLVM 17. Please refrain from using them and use the
// documented libc++ "safe" mode instead.
//
// Enables the hardened mode which consists of all checks intended to be used in production. Hardened mode prioritizes
// security-critical checks that can be done with relatively little overhead in constant time. Mutually exclusive with
// `_LIBCPP_ENABLE_DEBUG_MODE`.
@ -275,6 +272,11 @@
# error "Only one of _LIBCPP_ENABLE_HARDENED_MODE and _LIBCPP_ENABLE_DEBUG_MODE can be enabled."
# endif
# if _LIBCPP_ENABLE_ASSERTIONS && (_LIBCPP_ENABLE_HARDENED_MODE || _LIBCPP_ENABLE_DEBUG_MODE)
# error \
"_LIBCPP_ENABLE_ASSERTIONS is mutually exclusive with _LIBCPP_ENABLE_HARDENED_MODE and _LIBCPP_ENABLE_DEBUG_MODE."
# endif
// Hardened mode checks.
// clang-format off
@ -303,6 +305,18 @@
# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message)
# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
// Safe mode checks.
# elif _LIBCPP_ENABLE_ASSERTIONS
// All checks enabled.
# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message)
# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message)
# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message)
# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
// Disable all checks if hardening is not enabled.
# else

View file

@ -245,6 +245,9 @@ __handle_replacement_field(_Iterator __begin, _Iterator __end,
using _CharT = iter_value_t<_Iterator>;
__format::__parse_number_result __r = __format::__parse_arg_id(__begin, __end, __parse_ctx);
if (__r.__last == __end)
std::__throw_format_error("The argument index should end with a ':' or a '}'");
bool __parse = *__r.__last == _CharT(':');
switch (*__r.__last) {
case _CharT(':'):

View file

@ -10,6 +10,7 @@
#define _LIBCPP___LOCALE_LOCALE_BASE_API_LOCALE_GUARD_H
#include <__config>
#include <__locale> // for locale_t
#include <clocale>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)

View file

@ -171,11 +171,14 @@ struct __maybe_static_array {
_TStatic __static_val = _StaticValues::__get(__i);
if (__static_val == _DynTag) {
__dyn_vals_[_DynamicIdxMap::__get(__i)] = __values[__i];
}
// Precondition check
else
_LIBCPP_ASSERT_UNCATEGORIZED(__values[__i] == static_cast<_TDynamic>(__static_val),
"extents construction: mismatch of provided arguments with static extents.");
} else
// Not catching this could lead to out of bounds errors later
// e.g. using my_mdspan_t = mdspan<int, extents<int, 10>>; my_mdspan_t = m(new int[5], 5);
// Right-hand-side construction looks ok with allocation and size matching,
// but since (potentially elsewhere defined) my_mdspan_t has static size m now thinks its range is 10 not 5
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__values[__i] == static_cast<_TDynamic>(__static_val),
"extents construction: mismatch of provided arguments with static extents.");
}
}
@ -187,11 +190,14 @@ struct __maybe_static_array {
_TStatic __static_val = _StaticValues::__get(__i);
if (__static_val == _DynTag) {
__dyn_vals_[_DynamicIdxMap::__get(__i)] = static_cast<_TDynamic>(__vals[__i]);
}
// Precondition check
else
_LIBCPP_ASSERT_UNCATEGORIZED(static_cast<_TDynamic>(__vals[__i]) == static_cast<_TDynamic>(__static_val),
"extents construction: mismatch of provided arguments with static extents.");
} else
// Not catching this could lead to out of bounds errors later
// e.g. using my_mdspan_t = mdspan<int, extents<int, 10>>; my_mdspan_t = m(new int[N], span<int,1>(&N));
// Right-hand-side construction looks ok with allocation and size matching,
// but since (potentially elsewhere defined) my_mdspan_t has static size m now thinks its range is 10 not N
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
static_cast<_TDynamic>(__vals[__i]) == static_cast<_TDynamic>(__static_val),
"extents construction: mismatch of provided arguments with static extents.");
}
}
@ -310,28 +316,37 @@ class extents {
(sizeof...(_OtherIndexTypes) == __rank_ || sizeof...(_OtherIndexTypes) == __rank_dynamic_))
_LIBCPP_HIDE_FROM_ABI constexpr explicit extents(_OtherIndexTypes... __dynvals) noexcept
: __vals_(static_cast<index_type>(__dynvals)...) {
_LIBCPP_ASSERT_UNCATEGORIZED(__mdspan_detail::__are_representable_as<index_type>(__dynvals...),
"extents ctor: arguments must be representable as index_type and nonnegative");
// Not catching this could lead to out of bounds errors later
// e.g. mdspan m(ptr, dextents<char, 1>(200u)); leads to an extent of -56 on m
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__mdspan_detail::__are_representable_as<index_type>(__dynvals...),
"extents ctor: arguments must be representable as index_type and nonnegative");
}
template <class _OtherIndexType, size_t _Size>
requires(is_convertible_v<_OtherIndexType, index_type> && is_nothrow_constructible_v<index_type, _OtherIndexType> &&
requires(is_convertible_v<const _OtherIndexType&, index_type> &&
is_nothrow_constructible_v<index_type, const _OtherIndexType&> &&
(_Size == __rank_ || _Size == __rank_dynamic_))
explicit(_Size != __rank_dynamic_)
_LIBCPP_HIDE_FROM_ABI constexpr extents(const array<_OtherIndexType, _Size>& __exts) noexcept
: __vals_(span(__exts)) {
_LIBCPP_ASSERT_UNCATEGORIZED(__mdspan_detail::__are_representable_as<index_type>(span(__exts)),
"extents ctor: arguments must be representable as index_type and nonnegative");
// Not catching this could lead to out of bounds errors later
// e.g. mdspan m(ptr, dextents<char, 1>(array<unsigned,1>(200))); leads to an extent of -56 on m
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__mdspan_detail::__are_representable_as<index_type>(span(__exts)),
"extents ctor: arguments must be representable as index_type and nonnegative");
}
template <class _OtherIndexType, size_t _Size>
requires(is_convertible_v<_OtherIndexType, index_type> && is_nothrow_constructible_v<index_type, _OtherIndexType> &&
requires(is_convertible_v<const _OtherIndexType&, index_type> &&
is_nothrow_constructible_v<index_type, const _OtherIndexType&> &&
(_Size == __rank_ || _Size == __rank_dynamic_))
explicit(_Size != __rank_dynamic_)
_LIBCPP_HIDE_FROM_ABI constexpr extents(const span<_OtherIndexType, _Size>& __exts) noexcept
: __vals_(__exts) {
_LIBCPP_ASSERT_UNCATEGORIZED(__mdspan_detail::__are_representable_as<index_type>(__exts),
"extents ctor: arguments must be representable as index_type and nonnegative");
// Not catching this could lead to out of bounds errors later
// e.g. array a{200u}; mdspan<int, dextents<char,1>> m(ptr, extents(span<unsigned,1>(a))); leads to an extent of -56
// on m
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__mdspan_detail::__are_representable_as<index_type>(__exts),
"extents ctor: arguments must be representable as index_type and nonnegative");
}
private:
@ -380,10 +395,16 @@ class extents {
for (size_t __r = 0; __r < rank(); __r++) {
if constexpr (static_cast<make_unsigned_t<index_type>>(numeric_limits<index_type>::max()) <
static_cast<make_unsigned_t<_OtherIndexType>>(numeric_limits<_OtherIndexType>::max())) {
_LIBCPP_ASSERT_UNCATEGORIZED(__mdspan_detail::__is_representable_as<index_type>(__other.extent(__r)),
"extents ctor: arguments must be representable as index_type and nonnegative");
// Not catching this could lead to out of bounds errors later
// e.g. dextents<char,1>> e(dextents<unsigned,1>(200)) leads to an extent of -56 on e
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__mdspan_detail::__is_representable_as<index_type>(__other.extent(__r)),
"extents ctor: arguments must be representable as index_type and nonnegative");
}
_LIBCPP_ASSERT_UNCATEGORIZED(
// Not catching this could lead to out of bounds errors later
// e.g. mdspan<int, extents<int, 10>> m = mdspan<int, dextents<int, 1>>(new int[5], 5);
// Right-hand-side construction was ok, but m now thinks its range is 10 not 5
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
(_Values::__static_value(__r) == dynamic_extent) ||
(static_cast<index_type>(__other.extent(__r)) == static_cast<index_type>(_Values::__static_value(__r))),
"extents construction: mismatch of provided arguments with static extents.");

View file

@ -75,8 +75,11 @@ class layout_left::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr mapping() noexcept = default;
_LIBCPP_HIDE_FROM_ABI constexpr mapping(const mapping&) noexcept = default;
_LIBCPP_HIDE_FROM_ABI constexpr mapping(const extents_type& __ext) noexcept : __extents_(__ext) {
_LIBCPP_ASSERT(__required_span_size_is_representable(__ext),
"layout_left::mapping extents ctor: product of extents must be representable as index_type.");
// not catching this could lead to out-of-bounds access later when used inside mdspan
// mapping<dextents<char, 2>> map(dextents<char, 2>(40,40)); map(10, 3) == -126
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__required_span_size_is_representable(__ext),
"layout_left::mapping extents ctor: product of extents must be representable as index_type.");
}
template <class _OtherExtents>
@ -84,7 +87,9 @@ class layout_left::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr explicit(!is_convertible_v<_OtherExtents, extents_type>)
mapping(const mapping<_OtherExtents>& __other) noexcept
: __extents_(__other.extents()) {
_LIBCPP_ASSERT(
// not catching this could lead to out-of-bounds access later when used inside mdspan
// mapping<dextents<char, 2>> map(mapping<dextents<int, 2>>(dextents<int, 2>(40,40))); map(10, 3) == -126
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__mdspan_detail::__is_representable_as<index_type>(__other.required_span_size()),
"layout_left::mapping converting ctor: other.required_span_size() must be representable as index_type.");
}
@ -94,7 +99,13 @@ class layout_left::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr explicit(!is_convertible_v<_OtherExtents, extents_type>)
mapping(const layout_right::mapping<_OtherExtents>& __other) noexcept
: __extents_(__other.extents()) {
_LIBCPP_ASSERT(
// not catching this could lead to out-of-bounds access later when used inside mdspan
// Note: since this is constraint to rank 1, extents itself would catch the invalid conversion first
// and thus this assertion should never be triggered, but keeping it here for consistency
// layout_left::mapping<dextents<char, 1>> map(
// layout_right::mapping<dextents<unsigned, 1>>(dextents<unsigned, 1>(200))); map.extents().extent(0) ==
// -56
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__mdspan_detail::__is_representable_as<index_type>(__other.required_span_size()),
"layout_left::mapping converting ctor: other.required_span_size() must be representable as index_type.");
}
@ -122,6 +133,10 @@ class layout_left::mapping {
requires((sizeof...(_Indices) == extents_type::rank()) && (is_convertible_v<_Indices, index_type> && ...) &&
(is_nothrow_constructible_v<index_type, _Indices> && ...))
_LIBCPP_HIDE_FROM_ABI constexpr index_type operator()(_Indices... __idx) const noexcept {
// Mappings are generally meant to be used for accessing allocations and are meant to guarantee to never
// return a value exceeding required_span_size(), which is used to know how large an allocation one needs
// Thus, this is a canonical point in multi-dimensional data structures to make invalid element access checks
// However, mdspan does check this on its own, so for now we avoid double checking in hardened mode
_LIBCPP_ASSERT(__mdspan_detail::__is_multidimensional_index_in(__extents_, __idx...),
"layout_left::mapping: out of bounds indexing");
array<index_type, extents_type::rank()> __idx_a{static_cast<index_type>(__idx)...};
@ -144,9 +159,12 @@ class layout_left::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const noexcept
requires(extents_type::rank() > 0)
{
_LIBCPP_ASSERT(__r < extents_type::rank(), "layout_left::mapping::stride(): invalid rank index");
// While it would be caught by extents itself too, using a too large __r
// is functionally an out of bounds access on the stored information needed to compute strides
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__r < extents_type::rank(), "layout_left::mapping::stride(): invalid rank index");
index_type __s = 1;
for (rank_type __i = extents_type::rank() - 1; __i > __r; __i--)
for (rank_type __i = 0; __i < __r; __i++)
__s *= __extents_.extent(__i);
return __s;
}
@ -159,7 +177,7 @@ class layout_left::mapping {
}
private:
extents_type __extents_{}; // exposition only
_LIBCPP_NO_UNIQUE_ADDRESS extents_type __extents_{};
};
#endif // _LIBCPP_STD_VER >= 23

View file

@ -74,8 +74,11 @@ class layout_right::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr mapping() noexcept = default;
_LIBCPP_HIDE_FROM_ABI constexpr mapping(const mapping&) noexcept = default;
_LIBCPP_HIDE_FROM_ABI constexpr mapping(const extents_type& __ext) noexcept : __extents_(__ext) {
_LIBCPP_ASSERT(__required_span_size_is_representable(__ext),
"layout_right::mapping extents ctor: product of extents must be representable as index_type.");
// not catching this could lead to out-of-bounds access later when used inside mdspan
// mapping<dextents<char, 2>> map(dextents<char, 2>(40,40)); map(3, 10) == -126
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__required_span_size_is_representable(__ext),
"layout_right::mapping extents ctor: product of extents must be representable as index_type.");
}
template <class _OtherExtents>
@ -83,7 +86,9 @@ class layout_right::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr explicit(!is_convertible_v<_OtherExtents, extents_type>)
mapping(const mapping<_OtherExtents>& __other) noexcept
: __extents_(__other.extents()) {
_LIBCPP_ASSERT(
// not catching this could lead to out-of-bounds access later when used inside mdspan
// mapping<dextents<char, 2>> map(mapping<dextents<int, 2>>(dextents<int, 2>(40,40))); map(3, 10) == -126
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__mdspan_detail::__is_representable_as<index_type>(__other.required_span_size()),
"layout_right::mapping converting ctor: other.required_span_size() must be representable as index_type.");
}
@ -93,7 +98,13 @@ class layout_right::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr explicit(!is_convertible_v<_OtherExtents, extents_type>)
mapping(const layout_left::mapping<_OtherExtents>& __other) noexcept
: __extents_(__other.extents()) {
_LIBCPP_ASSERT(
// not catching this could lead to out-of-bounds access later when used inside mdspan
// Note: since this is constraint to rank 1, extents itself would catch the invalid conversion first
// and thus this assertion should never be triggered, but keeping it here for consistency
// layout_right::mapping<dextents<char, 1>> map(
// layout_left::mapping<dextents<unsigned, 1>>(dextents<unsigned, 1>(200))); map.extents().extent(0) ==
// -56
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__mdspan_detail::__is_representable_as<index_type>(__other.required_span_size()),
"layout_right::mapping converting ctor: other.required_span_size() must be representable as index_type.");
}
@ -121,6 +132,10 @@ class layout_right::mapping {
requires((sizeof...(_Indices) == extents_type::rank()) && (is_convertible_v<_Indices, index_type> && ...) &&
(is_nothrow_constructible_v<index_type, _Indices> && ...))
_LIBCPP_HIDE_FROM_ABI constexpr index_type operator()(_Indices... __idx) const noexcept {
// Mappings are generally meant to be used for accessing allocations and are meant to guarantee to never
// return a value exceeding required_span_size(), which is used to know how large an allocation one needs
// Thus, this is a canonical point in multi-dimensional data structures to make invalid element access checks
// However, mdspan does check this on its own, so for now we avoid double checking in hardened mode
_LIBCPP_ASSERT(__mdspan_detail::__is_multidimensional_index_in(__extents_, __idx...),
"layout_right::mapping: out of bounds indexing");
return [&]<size_t... _Pos>(index_sequence<_Pos...>) {
@ -141,7 +156,10 @@ class layout_right::mapping {
_LIBCPP_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const noexcept
requires(extents_type::rank() > 0)
{
_LIBCPP_ASSERT(__r < extents_type::rank(), "layout_right::mapping::stride(): invalid rank index");
// While it would be caught by extents itself too, using a too large __r
// is functionally an out of bounds access on the stored information needed to compute strides
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__r < extents_type::rank(), "layout_right::mapping::stride(): invalid rank index");
index_type __s = 1;
for (rank_type __i = extents_type::rank() - 1; __i > __r; __i--)
__s *= __extents_.extent(__i);
@ -156,7 +174,7 @@ class layout_right::mapping {
}
private:
extents_type __extents_{}; // exposition only
_LIBCPP_NO_UNIQUE_ADDRESS extents_type __extents_{};
};
#endif // _LIBCPP_STD_VER >= 23

View file

@ -0,0 +1,308 @@
// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
// Kokkos v. 4.0
// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
//===---------------------------------------------------------------------===//
#ifndef _LIBCPP___MDSPAN_MDSPAN_H
#define _LIBCPP___MDSPAN_MDSPAN_H
#include <__assert>
#include <__config>
#include <__fwd/mdspan.h>
#include <__mdspan/default_accessor.h>
#include <__mdspan/extents.h>
#include <__type_traits/extent.h>
#include <__type_traits/is_abstract.h>
#include <__type_traits/is_array.h>
#include <__type_traits/is_constructible.h>
#include <__type_traits/is_convertible.h>
#include <__type_traits/is_default_constructible.h>
#include <__type_traits/is_nothrow_constructible.h>
#include <__type_traits/is_pointer.h>
#include <__type_traits/is_same.h>
#include <__type_traits/rank.h>
#include <__type_traits/remove_all_extents.h>
#include <__type_traits/remove_cv.h>
#include <__type_traits/remove_pointer.h>
#include <__type_traits/remove_reference.h>
#include <__utility/integer_sequence.h>
#include <array>
#include <cinttypes>
#include <cstddef>
#include <limits>
#include <span>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
_LIBCPP_PUSH_MACROS
#include <__undef_macros>
_LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER >= 23
// Helper for lightweight test checking that one did pass a layout policy as LayoutPolicy template argument
namespace __mdspan_detail {
template <class _Layout, class _Extents>
concept __has_invalid_mapping = !requires { typename _Layout::template mapping<_Extents>; };
} // namespace __mdspan_detail
template <class _ElementType,
class _Extents,
class _LayoutPolicy = layout_right,
class _AccessorPolicy = default_accessor<_ElementType> >
class mdspan {
private:
static_assert(__mdspan_detail::__is_extents_v<_Extents>,
"mdspan: Extents template parameter must be a specialization of extents.");
static_assert(!is_array_v<_ElementType>, "mdspan: ElementType template parameter may not be an array type");
static_assert(!is_abstract_v<_ElementType>, "mdspan: ElementType template parameter may not be an abstract class");
static_assert(is_same_v<_ElementType, typename _AccessorPolicy::element_type>,
"mdspan: ElementType template parameter must match AccessorPolicy::element_type");
static_assert(!__mdspan_detail::__has_invalid_mapping<_LayoutPolicy, _Extents>,
"mdspan: LayoutPolicy template parameter is invalid. A common mistake is to pass a layout mapping "
"instead of a layout policy");
public:
using extents_type = _Extents;
using layout_type = _LayoutPolicy;
using accessor_type = _AccessorPolicy;
using mapping_type = typename layout_type::template mapping<extents_type>;
using element_type = _ElementType;
using value_type = remove_cv_t<element_type>;
using index_type = typename extents_type::index_type;
using size_type = typename extents_type::size_type;
using rank_type = typename extents_type::rank_type;
using data_handle_type = typename accessor_type::data_handle_type;
using reference = typename accessor_type::reference;
_LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return extents_type::rank(); }
_LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return extents_type::rank_dynamic(); }
_LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept {
return extents_type::static_extent(__r);
}
_LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept {
return __map_.extents().extent(__r);
};
public:
//--------------------------------------------------------------------------------
// [mdspan.mdspan.cons], mdspan constructors, assignment, and destructor
_LIBCPP_HIDE_FROM_ABI constexpr mdspan()
requires((extents_type::rank_dynamic() > 0) && is_default_constructible_v<data_handle_type> &&
is_default_constructible_v<mapping_type> && is_default_constructible_v<accessor_type>)
= default;
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(const mdspan&) = default;
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(mdspan&&) = default;
template <class... _OtherIndexTypes>
requires((is_convertible_v<_OtherIndexTypes, index_type> && ...) &&
(is_nothrow_constructible_v<index_type, _OtherIndexTypes> && ...) &&
((sizeof...(_OtherIndexTypes) == rank()) || (sizeof...(_OtherIndexTypes) == rank_dynamic())) &&
is_constructible_v<mapping_type, extents_type> && is_default_constructible_v<accessor_type>)
_LIBCPP_HIDE_FROM_ABI explicit constexpr mdspan(data_handle_type __p, _OtherIndexTypes... __exts)
: __ptr_(std::move(__p)), __map_(extents_type(static_cast<index_type>(std::move(__exts))...)), __acc_{} {}
template <class _OtherIndexType, size_t _Size>
requires(is_convertible_v<const _OtherIndexType&, index_type> &&
is_nothrow_constructible_v<index_type, const _OtherIndexType&> &&
((_Size == rank()) || (_Size == rank_dynamic())) && is_constructible_v<mapping_type, extents_type> &&
is_default_constructible_v<accessor_type>)
explicit(_Size != rank_dynamic())
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const array<_OtherIndexType, _Size>& __exts)
: __ptr_(std::move(__p)), __map_(extents_type(__exts)), __acc_{} {}
template <class _OtherIndexType, size_t _Size>
requires(is_convertible_v<const _OtherIndexType&, index_type> &&
is_nothrow_constructible_v<index_type, const _OtherIndexType&> &&
((_Size == rank()) || (_Size == rank_dynamic())) && is_constructible_v<mapping_type, extents_type> &&
is_default_constructible_v<accessor_type>)
explicit(_Size != rank_dynamic())
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, span<_OtherIndexType, _Size> __exts)
: __ptr_(std::move(__p)), __map_(extents_type(__exts)), __acc_{} {}
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const extents_type& __exts)
requires(is_default_constructible_v<accessor_type> && is_constructible_v<mapping_type, const extents_type&>)
: __ptr_(std::move(__p)), __map_(__exts), __acc_{} {}
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const mapping_type& __m)
requires(is_default_constructible_v<accessor_type>)
: __ptr_(std::move(__p)), __map_(__m), __acc_{} {}
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const mapping_type& __m, const accessor_type& __a)
: __ptr_(std::move(__p)), __map_(__m), __acc_(__a) {}
template <class _OtherElementType, class _OtherExtents, class _OtherLayoutPolicy, class _OtherAccessor>
requires(is_constructible_v<mapping_type, const typename _OtherLayoutPolicy::template mapping<_OtherExtents>&> &&
is_constructible_v<accessor_type, const _OtherAccessor&>)
explicit(!is_convertible_v<const typename _OtherLayoutPolicy::template mapping<_OtherExtents>&, mapping_type> ||
!is_convertible_v<const _OtherAccessor&, accessor_type>)
_LIBCPP_HIDE_FROM_ABI constexpr mdspan(
const mdspan<_OtherElementType, _OtherExtents, _OtherLayoutPolicy, _OtherAccessor>& __other)
: __ptr_(__other.__ptr_), __map_(__other.__map_), __acc_(__other.__acc_) {
static_assert(is_constructible_v<data_handle_type, const typename _OtherAccessor::data_handle_type&>,
"mdspan: incompatible data_handle_type for mdspan construction");
static_assert(
is_constructible_v<extents_type, _OtherExtents>, "mdspan: incompatible extents for mdspan construction");
// The following precondition is part of the standard, but is unlikely to be triggered.
// The extents constructor checks this and the mapping must be storing the extents, since
// its extents() function returns a const reference to extents_type.
// The only way this can be triggered is if the mapping conversion constructor would for example
// always construct its extents() only from the dynamic extents, instead of from the other extents.
if constexpr (rank() > 0) {
for (size_t __r = 0; __r < rank(); __r++) {
// Not catching this could lead to out of bounds errors later
// e.g. mdspan<int, dextents<char,1>, non_checking_layout> m =
// mdspan<int, dextents<unsigned, 1>, non_checking_layout>(ptr, 200); leads to an extent of -56 on m
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
(static_extent(__r) == dynamic_extent) ||
(static_cast<index_type>(__other.extent(__r)) == static_cast<index_type>(static_extent(__r))),
"mdspan: conversion mismatch of source dynamic extents with static extents");
}
}
}
_LIBCPP_HIDE_FROM_ABI constexpr mdspan& operator=(const mdspan&) = default;
_LIBCPP_HIDE_FROM_ABI constexpr mdspan& operator=(mdspan&&) = default;
//--------------------------------------------------------------------------------
// [mdspan.mdspan.members], members
template <class... _OtherIndexTypes>
requires((is_convertible_v<_OtherIndexTypes, index_type> && ...) &&
(is_nothrow_constructible_v<index_type, _OtherIndexTypes> && ...) &&
(sizeof...(_OtherIndexTypes) == rank()))
_LIBCPP_HIDE_FROM_ABI constexpr reference operator[](_OtherIndexTypes... __indices) const {
// Note the standard layouts would also check this, but user provided ones may not, so we
// check the precondition here
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__mdspan_detail::__is_multidimensional_index_in(extents(), __indices...),
"mdspan: operator[] out of bounds access");
return __acc_.access(__ptr_, __map_(static_cast<index_type>(std::move(__indices))...));
}
template <class _OtherIndexType>
requires(is_convertible_v<const _OtherIndexType&, index_type> &&
is_nothrow_constructible_v<index_type, const _OtherIndexType&>)
_LIBCPP_HIDE_FROM_ABI constexpr reference operator[](const array< _OtherIndexType, rank()>& __indices) const {
return __acc_.access(__ptr_, [&]<size_t... _Idxs>(index_sequence<_Idxs...>) {
return __map_(__indices[_Idxs]...);
}(make_index_sequence<rank()>()));
}
template <class _OtherIndexType>
requires(is_convertible_v<const _OtherIndexType&, index_type> &&
is_nothrow_constructible_v<index_type, const _OtherIndexType&>)
_LIBCPP_HIDE_FROM_ABI constexpr reference operator[](span<_OtherIndexType, rank()> __indices) const {
return __acc_.access(__ptr_, [&]<size_t... _Idxs>(index_sequence<_Idxs...>) {
return __map_(__indices[_Idxs]...);
}(make_index_sequence<rank()>()));
}
_LIBCPP_HIDE_FROM_ABI constexpr size_type size() const noexcept {
// Could leave this as only checked in debug mode: semantically size() is never
// guaranteed to be related to any accessible range
_LIBCPP_ASSERT_UNCATEGORIZED(
false == ([&]<size_t... _Idxs>(index_sequence<_Idxs...>) {
size_type __prod = 1;
return (__builtin_mul_overflow(__prod, extent(_Idxs), &__prod) || ... || false);
}(make_index_sequence<rank()>())),
"mdspan: size() is not representable as size_type");
return [&]<size_t... _Idxs>(index_sequence<_Idxs...>) {
return ((static_cast<size_type>(__map_.extents().extent(_Idxs))) * ... * size_type(1));
}(make_index_sequence<rank()>());
}
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool empty() const noexcept {
return [&]<size_t... _Idxs>(index_sequence<_Idxs...>) {
return (rank() > 0) && ((__map_.extents().extent(_Idxs) == index_type(0)) || ... || false);
}(make_index_sequence<rank()>());
}
_LIBCPP_HIDE_FROM_ABI friend constexpr void swap(mdspan& __x, mdspan& __y) noexcept {
swap(__x.__ptr_, __y.__ptr_);
swap(__x.__map_, __y.__map_);
swap(__x.__acc_, __y.__acc_);
}
_LIBCPP_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { return __map_.extents(); };
_LIBCPP_HIDE_FROM_ABI constexpr const data_handle_type& data_handle() const noexcept { return __ptr_; };
_LIBCPP_HIDE_FROM_ABI constexpr const mapping_type& mapping() const noexcept { return __map_; };
_LIBCPP_HIDE_FROM_ABI constexpr const accessor_type& accessor() const noexcept { return __acc_; };
_LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_unique() { return mapping_type::is_always_unique(); };
_LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() { return mapping_type::is_always_exhaustive(); };
_LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_strided() { return mapping_type::is_always_strided(); };
_LIBCPP_HIDE_FROM_ABI constexpr bool is_unique() const { return __map_.is_unique(); };
_LIBCPP_HIDE_FROM_ABI constexpr bool is_exhaustive() const { return __map_.is_exhaustive(); };
_LIBCPP_HIDE_FROM_ABI constexpr bool is_strided() const { return __map_.is_strided(); };
_LIBCPP_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const { return __map_.stride(__r); };
private:
_LIBCPP_NO_UNIQUE_ADDRESS data_handle_type __ptr_{};
_LIBCPP_NO_UNIQUE_ADDRESS mapping_type __map_{};
_LIBCPP_NO_UNIQUE_ADDRESS accessor_type __acc_{};
template <class, class, class, class>
friend class mdspan;
};
template <class _ElementType, class... _OtherIndexTypes>
requires((is_convertible_v<_OtherIndexTypes, size_t> && ...) && (sizeof...(_OtherIndexTypes) > 0))
explicit mdspan(_ElementType*, _OtherIndexTypes...)
-> mdspan<_ElementType, dextents<size_t, sizeof...(_OtherIndexTypes)>>;
template <class _Pointer>
requires(is_pointer_v<remove_reference_t<_Pointer>>)
mdspan(_Pointer&&) -> mdspan<remove_pointer_t<remove_reference_t<_Pointer>>, extents<size_t>>;
template <class _CArray>
requires(is_array_v<_CArray> && (rank_v<_CArray> == 1))
mdspan(_CArray&) -> mdspan<remove_all_extents_t<_CArray>, extents<size_t, extent_v<_CArray, 0>>>;
template <class _ElementType, class _OtherIndexType, size_t _Size>
mdspan(_ElementType*, const array<_OtherIndexType, _Size>&) -> mdspan<_ElementType, dextents<size_t, _Size>>;
template <class _ElementType, class _OtherIndexType, size_t _Size>
mdspan(_ElementType*, span<_OtherIndexType, _Size>) -> mdspan<_ElementType, dextents<size_t, _Size>>;
// This one is necessary because all the constructors take `data_handle_type`s, not
// `_ElementType*`s, and `data_handle_type` is taken from `accessor_type::data_handle_type`, which
// seems to throw off automatic deduction guides.
template <class _ElementType, class _OtherIndexType, size_t... _ExtentsPack>
mdspan(_ElementType*, const extents<_OtherIndexType, _ExtentsPack...>&)
-> mdspan<_ElementType, extents<_OtherIndexType, _ExtentsPack...>>;
template <class _ElementType, class _MappingType>
mdspan(_ElementType*, const _MappingType&)
-> mdspan<_ElementType, typename _MappingType::extents_type, typename _MappingType::layout_type>;
template <class _MappingType, class _AccessorType>
mdspan(const typename _AccessorType::data_handle_type, const _MappingType&, const _AccessorType&)
-> mdspan<typename _AccessorType::element_type,
typename _MappingType::extents_type,
typename _MappingType::layout_type,
_AccessorType>;
#endif // _LIBCPP_STD_VER >= 23
_LIBCPP_END_NAMESPACE_STD
_LIBCPP_POP_MACROS
#endif // _LIBCPP___MDSPAN_MDSPAN_H

View file

@ -0,0 +1,226 @@
// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// WARNING, this entire header is generated by
// utils/generate_std_clang_module_header.py
// DO NOT MODIFY!
// This header should not be directly included, it's exclusively to import all
// of the libc++ public clang modules for the `std` clang module to export. In
// other words, it's to facilitate `@import std;` in Objective-C++ and `import std`
// in Swift to expose all of the libc++ interfaces. This is generally not
// recommended, however there are some clients that need to import all of libc++
// without knowing what "all" is.
#if !__building_module(std)
# error "Do not include this header directly, include individual headers instead"
#endif
#include <__config>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
#include <algorithm>
#include <any>
#include <array>
#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
# include <atomic>
#endif
#if !defined(_LIBCPP_HAS_NO_THREADS)
# include <barrier>
#endif
#include <bit>
#include <bitset>
#include <cassert>
#include <ccomplex>
#include <cctype>
#include <cerrno>
#include <cfenv>
#include <cfloat>
#include <charconv>
#include <chrono>
#include <cinttypes>
#include <ciso646>
#include <climits>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <clocale>
#endif
#include <cmath>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <codecvt>
#endif
#include <compare>
#include <complex.h>
#include <complex>
#include <concepts>
#include <condition_variable>
#include <coroutine>
#include <csetjmp>
#include <csignal>
#include <cstdarg>
#include <cstdbool>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctgmath>
#include <ctime>
#include <ctype.h>
#include <cuchar>
#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
# include <cwchar>
#endif
#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
# include <cwctype>
#endif
#include <deque>
#include <errno.h>
#include <exception>
#include <execution>
#include <expected>
#include <experimental/deque>
#include <experimental/forward_list>
#include <experimental/iterator>
#include <experimental/list>
#include <experimental/map>
#include <experimental/memory_resource>
#include <experimental/propagate_const>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <experimental/regex>
#endif
#include <experimental/set>
#include <experimental/simd>
#include <experimental/string>
#include <experimental/type_traits>
#include <experimental/unordered_map>
#include <experimental/unordered_set>
#include <experimental/utility>
#include <experimental/vector>
#include <fenv.h>
#include <filesystem>
#include <float.h>
#include <format>
#include <forward_list>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <fstream>
#endif
#include <functional>
#if !defined(_LIBCPP_HAS_NO_THREADS)
# include <future>
#endif
#include <initializer_list>
#include <inttypes.h>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <iomanip>
#endif
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <ios>
#endif
#include <iosfwd>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <iostream>
#endif
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <istream>
#endif
#include <iterator>
#if !defined(_LIBCPP_HAS_NO_THREADS)
# include <latch>
#endif
#include <limits.h>
#include <limits>
#include <list>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <locale.h>
#endif
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <locale>
#endif
#include <map>
#include <math.h>
#include <mdspan>
#include <memory>
#include <memory_resource>
#include <mutex>
#include <new>
#include <numbers>
#include <numeric>
#include <optional>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <ostream>
#endif
#include <print>
#include <queue>
#include <random>
#include <ranges>
#include <ratio>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <regex>
#endif
#include <scoped_allocator>
#if !defined(_LIBCPP_HAS_NO_THREADS)
# include <semaphore>
#endif
#include <set>
#include <setjmp.h>
#if !defined(_LIBCPP_HAS_NO_THREADS)
# include <shared_mutex>
#endif
#include <source_location>
#include <span>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <sstream>
#endif
#include <stack>
#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
# include <stdatomic.h>
#endif
#include <stdbool.h>
#include <stddef.h>
#include <stdexcept>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#if !defined(_LIBCPP_HAS_NO_THREADS)
# include <stop_token>
#endif
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <streambuf>
#endif
#include <string.h>
#include <string>
#include <string_view>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
# include <strstream>
#endif
#include <system_error>
#include <tgmath.h>
#if !defined(_LIBCPP_HAS_NO_THREADS)
# include <thread>
#endif
#include <tuple>
#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include <uchar.h>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <valarray>
#include <variant>
#include <vector>
#include <version>
#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
# include <wchar.h>
#endif
#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
# include <wctype.h>
#endif

View file

@ -22,7 +22,8 @@
_LIBCPP_BEGIN_NAMESPACE_STD
#if __has_builtin(__is_nothrow_constructible)
// GCC is disabled due to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106611
#if __has_builtin(__is_nothrow_constructible) && !defined(_LIBCPP_COMPILER_GCC)
template < class _Tp, class... _Args>
struct _LIBCPP_TEMPLATE_VIS is_nothrow_constructible

Some files were not shown because too many files have changed in this diff Show more