From 465ac1626a5bcf1e1a9c48de9775077529177285 Mon Sep 17 00:00:00 2001 From: Chen Steenvoorden Date: Mon, 18 Nov 2024 16:37:58 +0100 Subject: [PATCH] feat: rework cheerp_allocate & co cheerp_allocate and related functions had a few issues, in particular they were not used consistently and so a lot of edge cases existed. On top of that, for Wasm we would lose the information about the original allocation/deallocation function called, and we defaulted to malloc/free. This meant that, for example, overriding the global new/delete would not have any effect. The new rules are: we ALWAYS use cheerp_allocate & co. For Wasm, we also include as first parameter the original function. We also do it for deallocation in genericjs when the argument might have come from linear memory. Then in GDA in llc we replace with the original function, is present. All the other changes are just minor consequences of this new system. --- clang/include/clang/Basic/BuiltinsCheerp.def | 1 + clang/lib/CodeGen/CGBuiltin.cpp | 167 ++++++++-------- clang/lib/CodeGen/CGDecl.cpp | 11 +- clang/lib/CodeGen/CGException.cpp | 30 +-- clang/lib/CodeGen/CGExprCXX.cpp | 46 +++-- clang/lib/CodeGen/ItaniumCXXABI.cpp | 53 ++--- clang/lib/Sema/SemaCoroutine.cpp | 13 +- .../test/asan/TestCases/calloc-overflow.cpp | 1 + .../test/asan/TestCases/use-after-delete.cpp | 11 +- llvm/include/llvm/Cheerp/PointerPasses.h | 18 +- llvm/include/llvm/Cheerp/Utility.h | 32 +-- llvm/include/llvm/Cheerp/Writer.h | 1 - llvm/include/llvm/IR/IntrinsicsCheerp.td | 4 +- llvm/lib/CheerpUtils/GlobalDepsAnalyzer.cpp | 148 ++++---------- llvm/lib/CheerpUtils/LinearMemoryHelper.cpp | 17 +- llvm/lib/CheerpUtils/NativeRewriter.cpp | 6 +- llvm/lib/CheerpUtils/PointerAnalyzer.cpp | 18 -- llvm/lib/CheerpUtils/PointerPasses.cpp | 185 ++++++------------ llvm/lib/CheerpUtils/TypeOptimizer.cpp | 18 +- llvm/lib/CheerpUtils/Utility.cpp | 167 +++++++--------- llvm/lib/CheerpWriter/CheerpWasmWriter.cpp | 23 --- llvm/lib/CheerpWriter/CheerpWriter.cpp | 114 +---------- llvm/lib/CheerpWriter/PreExecute.cpp | 10 +- llvm/lib/IR/Verifier.cpp | 8 +- .../Target/WebAssembly/CheerpWritePass.cpp | 2 +- llvm/lib/Transforms/Coroutines/CoroFrame.cpp | 12 +- 26 files changed, 426 insertions(+), 690 deletions(-) diff --git a/clang/include/clang/Basic/BuiltinsCheerp.def b/clang/include/clang/Basic/BuiltinsCheerp.def index 2ed55b5ee5e3..6d50ba73fe9b 100644 --- a/clang/include/clang/Basic/BuiltinsCheerp.def +++ b/clang/include/clang/Basic/BuiltinsCheerp.def @@ -34,6 +34,7 @@ BUILTIN(__builtin_cheerp_throw, "", "rB") BUILTIN(__builtin_cheerp_downcast, "", "B") BUILTIN(__builtin_cheerp_downcast_current, "", "B") BUILTIN(__builtin_cheerp_coro_alloc, "v*z", "n") +BUILTIN(__builtin_cheerp_deallocate, "vv*", "n") BUILTIN(__builtin_cheerp_environ, "vC*", "B") BUILTIN(__builtin_cheerp_argv, "vC*", "B") BUILTIN(__builtin_cheerp_get_threading_object, "", "B") diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index c26bff2654c2..dd07ff838b07 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -3421,22 +3421,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::Type *Tys[] = { VoidPtrTy, VoidPtrTy }; const CastExpr* retCE=dyn_cast_or_null(parent); llvm::Type *elementType = nullptr; - if (!retCE || retCE->getType()->isVoidPointerType()) + if (!retCE || retCE->getType()->isVoidPointerType()) { CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_alloc_requires_cast); - else - { - QualType returnType=retCE->getType(); - Tys[0] = Tys[1] = ConvertType(returnType); - elementType = ConvertType(returnType->getPointeeType()); + } else { + QualType returnType=retCE->getType(); + Tys[0] = ConvertType(returnType); + elementType = ConvertType(returnType->getPointeeType()); + CallBase* CB = cheerp::createCheerpAllocate(Builder, nullptr, elementType, Size); + return RValue::get(CB); } - Function *F = CGM.getIntrinsic(Intrinsic::cheerp_allocate, Tys); - CallBase* CB = Builder.CreateCall(F, {llvm::Constant::getNullValue(Tys[0]),Size}); - - assert(elementType); - assert(Tys[0]->isOpaquePointerTy() || Tys[0]->getNonOpaquePointerElementType() == elementType); - - CB->addParamAttr(0, llvm::Attribute::get(CB->getContext(), llvm::Attribute::ElementType, elementType)); - return RValue::get(CB); } const TargetInfo &TI = getContext().getTargetInfo(); // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. @@ -12553,6 +12546,13 @@ Value *CodeGenFunction::EmitCheerpBuiltinExpr(unsigned BuiltinID, Function *F = CGM.getIntrinsic(Intrinsic::cheerp_coro_alloc); return EmitCallOrInvoke(F, Ops); } + else if (BuiltinID == Cheerp::BI__builtin_cheerp_deallocate) { + // This is only used in SemaCoroutine, so we just care for the genericjs + // case, and for now only void* argument + llvm::Type *Tys[] = { VoidPtrTy, Ops[0]->getType() }; + Function *F = CGM.getIntrinsic(Intrinsic::cheerp_deallocate, Tys); + return EmitCallOrInvoke(F, {ConstantPointerNull::get(VoidPtrTy), Ops[0]}); + } else if (BuiltinID == Cheerp::BI__builtin_cheerp_throw) { llvm::Type *Tys[] = { Ops[0]->getType() }; Function *F = CGM.getIntrinsic(Intrinsic::cheerp_throw, Tys); @@ -12686,24 +12686,22 @@ Value *CodeGenFunction::EmitCheerpBuiltinExpr(unsigned BuiltinID, // We need an explicit cast after the call, void* can't be used llvm::Type *Tys[] = { VoidPtrTy, VoidPtrTy }; const CastExpr* retCE=dyn_cast_or_null(parent); - if (!retCE || retCE->getType()->isVoidPointerType()) - { - if (!asmjs) - CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_alloc_requires_cast); + llvm::Type* elementType = nullptr; + if (!retCE || retCE->getType()->isVoidPointerType()) { + if (!asmjs) { + CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_alloc_requires_cast); return 0; + } + } else { + elementType = ConvertTypeForMem(retCE->getType()->getPointeeType()); + Tys[0] = ConvertType(retCE->getType()); } - else - { - QualType returnType=retCE->getType(); - Tys[0] = Tys[1] = ConvertType(returnType); + llvm::Function* Malloc = nullptr; + // in Wasm, we pass the original allocation function as argument 0 + if (asmjs || (elementType->isStructTy() && cast(elementType)->hasAsmJS())) { + Malloc = dyn_cast(CGM.getModule().getOrInsertFunction("malloc", Int8PtrTy, Int32Ty).getCallee()); } - Function *F = CGM.getIntrinsic(Intrinsic::cheerp_allocate, Tys); - CallBase* CB = Builder.CreateCall(F, {llvm::Constant::getNullValue(Tys[0]), Ops[0]}); - - llvm::Type* elementType = ConvertTypeForMem(retCE->getType()->getPointeeType()); - assert(Tys[0]->isOpaquePointerTy() || Tys[0]->getNonOpaquePointerElementType() == elementType); - - CB->addParamAttr(0, llvm::Attribute::get(CB->getContext(), llvm::Attribute::ElementType, elementType)); + llvm::CallBase* CB = cheerp::createCheerpAllocate(Builder, Malloc, elementType, Ops[0]); return CB; } else if (BuiltinID == Builtin::BIcalloc) { @@ -12714,48 +12712,45 @@ Value *CodeGenFunction::EmitCheerpBuiltinExpr(unsigned BuiltinID, ParentMap PM(FD ? FD->getBody() : const_cast(VD->getInit())); const Stmt* parent=PM.getParent(E); // We need an explicit cast after the call, void* can't be used - llvm::Type *Tys[] = { VoidPtrTy , VoidPtrTy}; const CastExpr* retCE=dyn_cast_or_null(parent); - if (!retCE || retCE->getType()->isVoidPointerType()) - { - if (!asmjs) - CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_alloc_requires_cast); + llvm::Type* elementType = nullptr; + if (!retCE || retCE->getType()->isVoidPointerType()) { + if (!asmjs) { + CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_alloc_requires_cast); return 0; + } + } else { + elementType = ConvertTypeForMem(retCE->getType()->getPointeeType()); } - else - { - QualType returnType=retCE->getType(); - Tys[0] = Tys[1] = ConvertType(returnType); + llvm::Function* Malloc = nullptr; + // in Wasm, we pass the original allocation function as argument 0 + // in this case malloc and not calloc since we explicitly memset after + if (asmjs || (elementType->isStructTy() && cast(elementType)->hasAsmJS())) { + Malloc = dyn_cast(CGM.getModule().getOrInsertFunction("malloc", Int8PtrTy, Int32Ty).getCallee()); } - Function *F = CGM.getIntrinsic(Intrinsic::cheerp_allocate, Tys); // Compute the size in bytes llvm::Value* sizeInBytes = Builder.CreateMul(Ops[0], Ops[1]); - llvm::Value* NewOp[2] = { llvm::Constant::getNullValue(Tys[0]), sizeInBytes }; - llvm::CallBase* Ret = Builder.CreateCall(F, NewOp); - - llvm::Type* elementType = ConvertTypeForMem(retCE->getType()->getPointeeType()); - assert(Tys[0]->isOpaquePointerTy() || Tys[0]->getNonOpaquePointerElementType() == elementType); - - Ret->addParamAttr(0, llvm::Attribute::get(Ret->getContext(), llvm::Attribute::ElementType, elementType)); - Builder.CreateMemSet(Ret, ConstantInt::get(Int8Ty, 0), sizeInBytes, MaybeAlign(1), false, NULL, NULL, NULL, - CGBuilderTy::CheerpTypeInfo::get(getTarget().isByteAddressable(), ConvertType(retCE->getType()->getPointeeType()))); - return Ret; + llvm::CallBase* CB = cheerp::createCheerpAllocate(Builder, Malloc, elementType, sizeInBytes); + Builder.CreateMemSet(CB, ConstantInt::get(Int8Ty, 0), sizeInBytes, MaybeAlign(1), false, NULL, NULL, NULL, + CGBuilderTy::CheerpTypeInfo::get(getTarget().isByteAddressable(), elementType)); + return CB; } else if (BuiltinID == Builtin::BIrealloc) { // There must be an incoming cast, void* are not directly accepted - const CastExpr* argCE=dyn_cast(E->getArg(0)); + const Expr* existingMem = E->getArg(0); + const CastExpr* argCE=dyn_cast(existingMem); - if (!argCE || argCE->getSubExpr()->getType()->isVoidPointerType()) { - if (!asmjs) + if ((!argCE || argCE->getSubExpr()->getType()->isVoidPointerType())) { + if (!asmjs) { CGM.getDiags().Report(E->getArg(0)->getBeginLoc(), diag::err_cheerp_memintrinsic_type_unknown); - return 0; + return 0; + } + } else { + existingMem = argCE->getSubExpr(); } - //TODO: realloc can be invoked with NULL, support that - const Expr* existingMem=argCE->getSubExpr(); // The type for the realloc is decided from the base type QualType reallocType=existingMem->getType(); - llvm::Type *Tys[] = { VoidPtrTy, ConvertType(reallocType) }; Ops[0]=EmitScalarExpr(existingMem); // Some additional checks that can't be done in Sema const FunctionDecl* FD=dyn_cast_if_present(CurFuncDecl); @@ -12766,29 +12761,32 @@ Value *CodeGenFunction::EmitCheerpBuiltinExpr(unsigned BuiltinID, const Stmt* parent=PM.getParent(E); // We need an explicit cast after the call, void* can't be used const CastExpr* retCE=dyn_cast_or_null(parent); - if (!retCE || retCE->getType()->isVoidPointerType()) + llvm::Type* elementType = nullptr; + if ((!retCE || retCE->getType()->isVoidPointerType())) { - if (!asmjs) + if (!asmjs) { CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_alloc_requires_cast); - return 0; + return 0; + } } else if(retCE->getType().getCanonicalType()!=reallocType.getCanonicalType()) { - if (asmjs) return 0; - CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_realloc_different_types); + if (!asmjs) { + CGM.getDiags().Report(E->getBeginLoc(), diag::err_cheerp_realloc_different_types); + return 0; + } } else { // The call is fully valid, so set the return type to the existing type - Tys[0]=Tys[1]; + elementType = ConvertTypeForMem(reallocType->getPointeeType()); } - llvm::Type* elementType = ConvertTypeForMem(reallocType->getPointeeType()); - assert(Tys[0]->isOpaquePointerTy() || Tys[0]->getNonOpaquePointerElementType() == elementType); - - Function *reallocFunc = CGM.getIntrinsic(Intrinsic::cheerp_reallocate, Tys); if(asmjs) { - CallBase* CB = Builder.CreateCall(reallocFunc, Ops); - CB->addParamAttr(0, llvm::Attribute::get(CB->getContext(), llvm::Attribute::ElementType, elementType)); + llvm::Type *RetTy = Builder.getInt8PtrTy(); + llvm::Type *Arg0Ty = Builder.getInt8PtrTy(); + llvm::Type *Arg1Ty = Builder.getInt32Ty(); + llvm::Function* origReallocFunc = dyn_cast(CGM.getModule().getOrInsertFunction("realloc", RetTy, Arg0Ty, Arg1Ty).getCallee()); + CallBase* CB = cheerp::createCheerpReallocate(Builder, origReallocFunc, elementType, Ops[0], Ops[1]); return CB; } else { // realloc needs to behave like malloc if the operand is null @@ -12799,32 +12797,37 @@ Value *CodeGenFunction::EmitCheerpBuiltinExpr(unsigned BuiltinID, Builder.CreateCondBr(opIsNull, mallocBlock, reallocBlock); Builder.SetInsertPoint(mallocBlock); - Function *mallocFunc = CGM.getIntrinsic(Intrinsic::cheerp_allocate, Tys); - llvm::CallBase* mallocRet = Builder.CreateCall(mallocFunc, {llvm::Constant::getNullValue(Tys[0]), Ops[1]}); - mallocRet->addParamAttr(0, llvm::Attribute::get(mallocRet->getContext(), llvm::Attribute::ElementType, elementType)); + CallBase* mallocRet = cheerp::createCheerpAllocate(Builder, nullptr, elementType, Ops[1]); Builder.CreateBr(endBlock); Builder.SetInsertPoint(reallocBlock); - llvm::CallBase* reallocRet = cast(Builder.CreateCall(reallocFunc, Ops)); - reallocRet->addParamAttr(0, llvm::Attribute::get(reallocRet->getContext(), llvm::Attribute::ElementType, elementType)); + CallBase* reallocRet = cheerp::createCheerpReallocate(Builder, nullptr, elementType, Ops[0], Ops[1]); Builder.CreateBr(endBlock); Builder.SetInsertPoint(endBlock); - llvm::PHINode* Result = Builder.CreatePHI(Tys[0], 2); + llvm::PHINode* Result = Builder.CreatePHI(mallocRet->getType(), 2); Result->addIncoming(mallocRet, mallocBlock); Result->addIncoming(reallocRet, reallocBlock); return Result; } } else if (BuiltinID == Builtin::BIfree) { - llvm::Value* origArg = Ops[0]; - llvm::Type* origType = origArg->getType(); - if (CallInst* CI = dyn_cast(Ops[0])) { - if (auto* c = dyn_cast(E->getArg(0))) { - origArg = CI->getOperand(0); - origType = origArg->getType(); + const CastExpr* argCE=dyn_cast(E->getArg(0)); + llvm::Type* elementType = nullptr; + if (argCE) { + QualType ptrTy = argCE->getSubExpr()->getType(); + if (ptrTy->isPointerType() && !ptrTy->isVoidPointerType()) { + elementType = ConvertType(ptrTy->getPointeeType()); + Ops[0]=EmitScalarExpr(argCE->getSubExpr()); } } - Function *F = CGM.getIntrinsic(Intrinsic::cheerp_deallocate, {origType}); - return Builder.CreateCall(F, origArg); + llvm::Function* Free = nullptr; + // in Wasm, we pass the original deallocation function as argument 0 + // For free, we always pass this argument unless the element type is a genericjs struct, + // because the pointer may have come from Wasm originally + if (asmjs || (elementType && elementType->isStructTy() && !cast(elementType)->hasAsmJS())) { + Free = dyn_cast(CGM.getModule().getOrInsertFunction("free", VoidTy, Int8PtrTy).getCallee()); + } + llvm::CallBase* CB = cheerp::createCheerpDeallocate(Builder, Free, elementType, Ops[0]); + return CB; } return 0; } diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 6d25f5f0b8a4..6344360f1dfd 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -33,6 +33,7 @@ #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Sema/Sema.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/Cheerp/Utility.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Intrinsics.h" @@ -1602,15 +1603,9 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { llvm::Type *llvmTy = ConvertTypeForMem(elementType); uint64_t size = CGM.getDataLayout().getTypeAllocSize(llvmTy); llvm::Constant* typeSize = llvm::ConstantInt::get(elementCount->getType(), size); - - llvm::Type* Tys[] = { llvmTy->getPointerTo(), llvmTy->getPointerTo() }; - llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::cheerp_allocate, Tys); - // Compute the size in bytes llvm::Value* sizeInBytes = Builder.CreateMul(elementCount, typeSize); - llvm::Value* Args[2] = { llvm::Constant::getNullValue(llvmTy->getPointerTo()), sizeInBytes }; - // Allocate memory for the array. - llvm::Value* Ret = Builder.CreateCall(F, Args); - cast(Ret)->addParamAttr(0, llvm::Attribute::get(Ret->getContext(), llvm::Attribute::ElementType, llvmTy)); + + llvm::Value* Ret = cheerp::createCheerpAllocate(Builder, nullptr, llvmTy, sizeInBytes); address = Address(Ret, llvmTy, CharUnits::One()); } else { EnsureInsertPoint(); diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp index 80c9a48f66cc..cd50b78fe3fd 100644 --- a/clang/lib/CodeGen/CGException.cpp +++ b/clang/lib/CodeGen/CGException.cpp @@ -22,6 +22,7 @@ #include "clang/AST/StmtVisitor.h" #include "clang/Basic/DiagnosticSema.h" #include "clang/Basic/TargetBuiltins.h" +#include "llvm/Cheerp/Utility.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicsWebAssembly.h" @@ -30,21 +31,13 @@ using namespace clang; using namespace CodeGen; -static llvm::FunctionCallee getFreeExceptionFn(CodeGenModule &CGM, llvm::Type* t) { +static llvm::FunctionCallee getFreeExceptionFn(CodeGenModule &CGM) { // void __cxa_free_exception(void *thrown_exception); - if(CGM.getTarget().isByteAddressable()) { - llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false); + llvm::FunctionType *FTy = + llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false); - return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception"); - } - else { - llvm::Type* types[] = { t }; - llvm::Function* F = llvm::Intrinsic::getDeclaration(&CGM.getModule(), - llvm::Intrinsic::cheerp_deallocate, types); - return llvm::FunctionCallee(F->getFunctionType(), F); - } + return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception"); } static llvm::FunctionCallee getSehTryBeginFn(CodeGenModule &CGM) { @@ -399,7 +392,18 @@ namespace { llvm::Value *exn; FreeException(llvm::Value *exn) : exn(exn) {} void Emit(CodeGenFunction &CGF, Flags flags) override { - CGF.EmitNounwindRuntimeCall(getFreeExceptionFn(CGF.CGM, exn->getType()), exn); + if (CGF.getLangOpts().Cheerp) { + llvm::Function* origFunc = nullptr; + if (CGF.getTarget().getTriple().isCheerpWasm()) { + llvm::FunctionType *FreeTy = + llvm::FunctionType::get(CGF.VoidTy, CGF.VoidPtrTy, /*isVarArg=*/false); + + origFunc = cast(CGF.CGM.CreateRuntimeFunction(FreeTy, "free").getCallee()); + } + cheerp::createCheerpDeallocate(CGF.Builder, origFunc, nullptr, exn); + } else { + CGF.EmitNounwindRuntimeCall(getFreeExceptionFn(CGF.CGM), exn); + } } }; } // end anonymous namespace diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index b5681650de29..cc643c7c8659 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -1347,6 +1347,10 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF, bool cheerp = !CGF.getTarget().isByteAddressable(); bool asmjs = CGF.CurFn->getSection() == StringRef("asmjs"); bool user_defined_new = false; + // CHEERP: in Wasm, we use cheerp_allocate/deallocate only for: + // new(size_t), new[](size_t), new(size_t, const std::nothrow_t&), new[](size_t, const std::nothrow_t&), + // delete(void*), delete(void*, const std::nothrow_t&), delete[](void*), delete[](void*, const std::nothrow_t&). + bool fancy_new = false; bool use_array = false; if (IsArray) { if (const CXXRecordDecl* RD = allocType->getAsCXXRecordDecl()) @@ -1360,35 +1364,37 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF, break; } } + if (IsDelete && (Args.size() > 2 || (Args.size() == 2 && !Args[1].getType()->isReferenceType()))) { + fancy_new = true; + } else if (!IsDelete && (Args.size() > 2 || (Args.size() == 2 && !Args[1].getType()->isReferenceType()))) { + fancy_new = true; + } //CHEERP TODO: warning/error when `cheerp && !asmjs && user_defined_new` - if(!IsDelete && cheerp && !(asmjs && user_defined_new)) + if(!IsDelete && cheerp && !(asmjs && (user_defined_new || fancy_new))) { // Forge a call to a special type safe allocator intrinsic QualType retType = CGF.getContext().getPointerType(allocType); - llvm::Type* types[] = { CGF.ConvertType(retType), CGF.ConvertType(retType) }; - - llvm::Function* CalleeAddr = llvm::Intrinsic::getDeclaration(&CGF.CGM.getModule(), - use_array? llvm::Intrinsic::cheerp_allocate_array : - llvm::Intrinsic::cheerp_allocate, - types); - llvm::Value* Arg[] = { llvm::Constant::getNullValue(types[0]), Args[0].getKnownRValue().getScalarVal() }; - CallOrInvoke = CGF.Builder.CreateCall(cast(CalleeAddr->getValueType()), CalleeAddr, Arg); + llvm::Function* origFunc = nullptr; + if (asmjs || (allocType->getAsTagDecl() && allocType->getAsTagDecl()->hasAttr())) { + origFunc = cast(CalleePtr); + } llvm::Type* elementType = CGF.ConvertTypeForMem(retType->getPointeeType()); - assert(types[0]->isOpaquePointerTy() || types[0]->getNonOpaquePointerElementType() == elementType); - CallOrInvoke->addParamAttr(0, llvm::Attribute::get(CallOrInvoke->getContext(), llvm::Attribute::ElementType, elementType)); + CallOrInvoke = cheerp::createCheerpAllocate(CGF.Builder, origFunc, elementType, Args[0].getKnownRValue().getScalarVal(), use_array); RV = RValue::get(CallOrInvoke); } - else if(IsDelete && cheerp && !(asmjs && user_defined_new)) + else if(IsDelete && cheerp && !(asmjs && (user_defined_new || fancy_new))) { - QualType retType = CGF.getContext().getPointerType(allocType); - llvm::Type* types[] = { CGF.ConvertType(retType) }; - llvm::Function* CalleeAddr = llvm::Intrinsic::getDeclaration(&CGF.CGM.getModule(), - llvm::Intrinsic::cheerp_deallocate, types); - llvm::Value* Arg[] = { Args[0].getKnownRValue().getScalarVal() }; - if (Arg[0]->getType() != types[0]) { - Arg[0] = CGF.Builder.CreateBitCast(Arg[0], types[0]); + llvm::Function* origFunc = nullptr; + if (asmjs || !(allocType->getAsTagDecl() && allocType->getAsTagDecl()->hasAttr())) { + origFunc = cast(CalleePtr); + } + QualType argType = CGF.getContext().getPointerType(allocType); + llvm::Type* elementType = CGF.ConvertTypeForMem(argType->getPointeeType()); + llvm::Value* ptrArg = Args[0].getKnownRValue().getScalarVal(); + if (ptrArg->getType() != CGF.ConvertType(argType)) { + ptrArg = CGF.Builder.CreateBitCast(ptrArg, CGF.ConvertType(argType)); } - CallOrInvoke = CGF.Builder.CreateCall(cast(CalleeAddr->getValueType()), CalleeAddr, Arg); + CallOrInvoke = cheerp::createCheerpDeallocate(CGF.Builder, origFunc, elementType, ptrArg); RV = RValue::get(CallOrInvoke); } else diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index 29491c7e049c..b69bbc0d6ef6 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -1351,15 +1351,8 @@ void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { CGF.EmitRuntimeCallOrInvoke(Fn); } -static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM, llvm::Type* pointedTy) { +static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) { // void *__cxa_allocate_exception(size_t thrown_size); - - if (!CGM.getTarget().isByteAddressable()) { - llvm::Type* Tys[2] = { pointedTy->getPointerTo(), pointedTy->getPointerTo() }; - llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::cheerp_allocate, Tys); - return llvm::FunctionCallee(F); - } - llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false); @@ -1416,26 +1409,36 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { elemTy = CGM.VoidPtrTy; } - llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM, elemTy); - - llvm::FunctionType* FT = AllocExceptionFn.getFunctionType(); - SmallVector Ops(FT->getNumParams()); - if (FT->getNumParams() == 2) { - // Cheerp specific logic - assert(!CGM.getTarget().isByteAddressable()); - Ops[0] = llvm::Constant::getNullValue(FT->getParamType(0)); - } - Ops.back() = llvm::ConstantInt::get(SizeTy, TypeSize); + llvm::CallInst* ExceptionPtr = nullptr; + llvm::Value* Size = llvm::ConstantInt::get(SizeTy, TypeSize); + if (CGM.getLangOpts().Cheerp) { + llvm::Function* origFunc = nullptr; + // We want to know if this exception is supposed to be allocated as a wasm + // or js object + // TODO: do it better when we have address spaces + bool asmjs = false; + bool genericjs = false; + if (const RecordType *RecordTy = ThrowType->getAs()) { + auto* D = RecordTy->getDecl(); + asmjs = D->hasAttr(); + genericjs = D->hasAttr(); + } + if (!asmjs && !genericjs) { + asmjs = CGM.getTarget().getTriple().isCheerpWasm(); + } + if (asmjs) { + llvm::FunctionType *MallocTy = + llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int32Ty, /*isVarArg=*/false); - llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( - AllocExceptionFn, Ops, "exception"); + origFunc = cast(CGF.CGM.CreateRuntimeFunction(MallocTy, "malloc").getCallee()); + } + ExceptionPtr = cheerp::createCheerpAllocate(CGF.Builder, origFunc, elemTy, Size); + } else { + llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM); + ExceptionPtr = CGF.EmitNounwindRuntimeCall( + AllocExceptionFn, Size, "exception"); - if (FT->getNumParams() == 2) { - llvm::Type* paramType = AllocExceptionFn.getFunctionType()->getParamType(0); - assert(paramType->isOpaquePointerTy() || paramType->getNonOpaquePointerElementType() == elemTy); - ExceptionPtr->addParamAttr(0, llvm::Attribute::get(ExceptionPtr->getContext(), llvm::Attribute::ElementType, elemTy)); } - CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment(); if(!CGM.getTarget().isByteAddressable() && ThrowType->isPointerType()) CGF.EmitTypedPtrExprToExn(E->getSubExpr(), Address(ExceptionPtr, CGM.Int8PtrTy, ExnAlign)); diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp index 51ae0c67a2f4..bf7a62471eba 100644 --- a/clang/lib/Sema/SemaCoroutine.cpp +++ b/clang/lib/Sema/SemaCoroutine.cpp @@ -1588,10 +1588,15 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() { FrameAlignment->getType())) DeleteArgs.push_back(FrameAlignment); - ExprResult DeleteExpr = - S.BuildCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc); - DeleteExpr = - S.ActOnFinishFullExpr(DeleteExpr.get(), /*DiscardedValue*/ false); + ExprResult DeleteExpr; + if (!asmjs && S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::cheerp) { + DeleteExpr = S.BuildBuiltinCallExpr(Loc, (Builtin::ID)Cheerp::BI__builtin_cheerp_deallocate, {CoroFree}); + } else { + DeleteExpr = + S.BuildCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc); + DeleteExpr = + S.ActOnFinishFullExpr(DeleteExpr.get(), /*DiscardedValue*/ false); + } if (DeleteExpr.isInvalid()) return false; diff --git a/compiler-rt/test/asan/TestCases/calloc-overflow.cpp b/compiler-rt/test/asan/TestCases/calloc-overflow.cpp index 5fdd50122c94..8aaf24c1871b 100644 --- a/compiler-rt/test/asan/TestCases/calloc-overflow.cpp +++ b/compiler-rt/test/asan/TestCases/calloc-overflow.cpp @@ -1,6 +1,7 @@ // RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s // REQUIRES: stable-runtime +// XFAIL: cheerp #include #include diff --git a/compiler-rt/test/asan/TestCases/use-after-delete.cpp b/compiler-rt/test/asan/TestCases/use-after-delete.cpp index fef8f5707e2e..47bc7194252f 100644 --- a/compiler-rt/test/asan/TestCases/use-after-delete.cpp +++ b/compiler-rt/test/asan/TestCases/use-after-delete.cpp @@ -21,8 +21,9 @@ int main() { // CHECK-Windows:{{ #0 0x.* in operator delete\[\]}} // CHECK-FreeBSD:{{ #0 0x.* in operator delete\[\]}} // CHECK-Darwin: {{ #0 0x.* in .*_Zda}} - // CHECK: {{ #0 0x.* in .*free}} - // CHECK-NEXT: {{ #1 0x.* in .*main .*}} + // CHECK: {{ #0 0x.* in .*free .*}} + // CHECK: {{ #1 0x.* in operator delete\[\]}} + // CHECK-NEXT: {{ #2 0x.* in .*main .*}} // CHECK: {{previously allocated by thread T0 here:}} // CHECK-Linux: {{ #0 0x.* in operator new\[\]}} @@ -30,8 +31,10 @@ int main() { // CHECK-Windows:{{ #0 0x.* in operator new\[\]}} // CHECK-FreeBSD:{{ #0 0x.* in operator new\[\]}} // CHECK-Darwin: {{ #0 0x.* in .*_Zna}} - // CHECK: {{ #0 0x.* in .*malloc}} - // CHECK-NEXT: {{ #1 0x.* in .*main .*}} + // CHECK: {{ #0 0x.* in .*malloc .*}} + // CHECK: {{ #1 0x.* in operator new.*}} + // CHECK: {{ #2 0x.* in operator new\[\]}} + // CHECK-NEXT: {{ #3 0x.* in .*main .*}} // CHECK: Shadow byte legend (one shadow byte represents {{[0-9]+}} application bytes): // CHECK: Global redzone: diff --git a/llvm/include/llvm/Cheerp/PointerPasses.h b/llvm/include/llvm/Cheerp/PointerPasses.h index eaaacb059ff9..efabdb5f5389 100644 --- a/llvm/include/llvm/Cheerp/PointerPasses.h +++ b/llvm/include/llvm/Cheerp/PointerPasses.h @@ -81,7 +81,9 @@ class PointerToImmutablePHIRemovalPass : public llvm::PassInfoMixin { +public: + llvm::PreservedAnalyses run(llvm::Function& F, llvm::FunctionAnalysisManager& FAM); + static bool isRequired() { return true;} +}; + /** diff --git a/llvm/include/llvm/Cheerp/Utility.h b/llvm/include/llvm/Cheerp/Utility.h index 81576f4787a6..22654f79803e 100644 --- a/llvm/include/llvm/Cheerp/Utility.h +++ b/llvm/include/llvm/Cheerp/Utility.h @@ -23,6 +23,7 @@ #include "llvm/IR/AbstractCallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Operator.h" @@ -278,6 +279,23 @@ inline bool isGEP(const llvm::Value* v) return false; } +llvm::CallInst* createCheerpAllocate(llvm::IRBuilderBase& Builder, + llvm::Function* origFunc, + llvm::Type* elementType, + llvm::Value* sizeArg, + bool use_array = false); + +llvm::CallInst* createCheerpReallocate(llvm::IRBuilderBase& Builder, + llvm::Function* origFunc, + llvm::Type* elementType, + llvm::Value* ptrArg, + llvm::Value* sizeArg); + +llvm::CallInst* createCheerpDeallocate(llvm::IRBuilderBase& Builder, + llvm::Function* origFunc, + llvm::Type* elementType, + llvm::Value* ptrArg); + //Utility function that calculate the offset for Structs or Array at a given index of a GEP int32_t partialOffset(llvm::Type* & curType, llvm::Type* alternative, const llvm::DataLayout& DL, const int32_t index); @@ -304,11 +322,6 @@ bool hasNonLoadStoreUses(const llvm::Value* v); llvm::Type* getGEPContainerType(const llvm::User* gep); -inline bool isFreeFunctionName(llvm::StringRef name) -{ - return name=="free" || name=="_ZdlPv" || name=="_ZdaPv"; -} - struct LoopWithDepth { LoopWithDepth(const llvm::Loop* loop) @@ -601,12 +614,8 @@ class DynamicAllocInfo enum AllocType { not_an_alloc, - malloc, - calloc, cheerp_allocate, cheerp_reallocate, - opnew, // operator new(unsigned int) - opnew_array // operator new[](unsigned int) }; /** @@ -645,11 +654,6 @@ class DynamicAllocInfo * This argument will never be null */ const llvm::Value * getByteSizeArg() const; - - /** - * This can be null if getAllocType() == calloc - */ - const llvm::Value * getNumberOfElementsArg() const; /** * This can be null if getAllocType() != cheerp_reallocate diff --git a/llvm/include/llvm/Cheerp/Writer.h b/llvm/include/llvm/Cheerp/Writer.h index ab84416f44d0..a8b0f4d8286f 100644 --- a/llvm/include/llvm/Cheerp/Writer.h +++ b/llvm/include/llvm/Cheerp/Writer.h @@ -334,7 +334,6 @@ class CheerpWriter final : public CheerpBaseWriter uint32_t compileArraySize(const DynamicAllocInfo& info, bool shouldPrint, bool isBytes = false); void compileAllocation(const DynamicAllocInfo& info); - COMPILE_INSTRUCTION_FEEDBACK compileFree(const llvm::Value* obj); /** @} */ diff --git a/llvm/include/llvm/IR/IntrinsicsCheerp.td b/llvm/include/llvm/IR/IntrinsicsCheerp.td index 5c5fb43043e5..127997440fae 100644 --- a/llvm/include/llvm/IR/IntrinsicsCheerp.td +++ b/llvm/include/llvm/IR/IntrinsicsCheerp.td @@ -45,9 +45,9 @@ def int_cheerp_allocate_array : Intrinsic<[llvm_anyptr_ty], def int_cheerp_get_array_len : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>; def int_cheerp_reallocate : Intrinsic<[llvm_anyptr_ty], - [llvm_anyptr_ty,llvm_i32_ty]>; + [llvm_anyptr_ty,llvm_anyptr_ty, llvm_i32_ty]>; def int_cheerp_deallocate : Intrinsic<[], - [llvm_anyptr_ty]>; + [llvm_anyptr_ty, llvm_anyptr_ty]>; def int_cheerp_coro_alloc : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>; diff --git a/llvm/lib/CheerpUtils/GlobalDepsAnalyzer.cpp b/llvm/lib/CheerpUtils/GlobalDepsAnalyzer.cpp index 91139e34477e..c0dbf22ea2a2 100644 --- a/llvm/lib/CheerpUtils/GlobalDepsAnalyzer.cpp +++ b/llvm/lib/CheerpUtils/GlobalDepsAnalyzer.cpp @@ -259,63 +259,40 @@ bool GlobalDepsAnalyzer::runOnModule( llvm::Module & module ) continue; } - if(asmjs) + if((II == Intrinsic::cheerp_allocate || + II == Intrinsic::cheerp_allocate_array || + II == Intrinsic::cheerp_reallocate || + II == Intrinsic::cheerp_deallocate ) && + !isa(ci->getArgOperand(0))) { - if(II == Intrinsic::cheerp_allocate) + Function* OrigFunc = dyn_cast(ci->getOperand(0)); + assert(OrigFunc); + if (llcPass) { - //cheerp_allocate(nullptr, N) -> malloc(N), so we need to move argument(1) to argument(0) + // cheerp_*allocate functions have the form + // cheerp_func(orig_func, args...). + // If orig_func is not null, convert to orig_func(args...) + // and add appropriate bitcasts IRBuilder<> Builder(ci); - FunctionAnalysisManager& FAM = MAM->getResult(module).getManager(); - const llvm::TargetLibraryInfo* TLI = &FAM.getResult(F); - assert(TLI); - Value* newCall = emitMalloc(ci->getOperand(1), Builder, *DL, TLI); - Type* oldType = ci->getType(); - if(oldType != newCall->getType()) - { - Instruction* newCast = new BitCastInst(UndefValue::get(newCall->getType()), oldType, "", ci->getNextNode()); - ci->replaceAllUsesWith(newCast); - ci->mutateType(newCall->getType()); - newCast->setOperand(0, ci); + SmallVector newOps; + for (uint32_t i = 0; i < OrigFunc->getFunctionType()->getNumParams(); i++) { + Type* expectedTy = OrigFunc->getFunctionType()->getParamType(i); + // HACK: the nothrow version of new has std::nothrow_t as last parameter + // we pass a constant null to it + Value* op = i+1 >= ci->arg_size()? ConstantPointerNull::get(cast(expectedTy)) : ci->getArgOperand(i+1); + newOps.push_back(Builder.CreateBitCast(op, expectedTy)); } - ci->replaceAllUsesWith(newCall); - - //Set up loop variable, so the next loop will check and possibly expand newCall - --instructionIterator; - advance = false; - assert(&*instructionIterator == newCall); - + CallBase* NewCall = Builder.CreateCall(OrigFunc, newOps); + Value* replacement = Builder.CreateBitCast(NewCall, ci->getType()); + ci->replaceAllUsesWith(replacement); ci->eraseFromParent(); + instructionIterator = NewCall->getIterator(); + advance = false; continue; } - else if(II == Intrinsic::cheerp_reallocate) - { - Function* F = module.getFunction("realloc"); - assert(F); - Type* oldType = ci->getType(); - if(oldType != F->getReturnType()) - { - Instruction* newParamCast = new BitCastInst(ci->getOperand(0), F->getReturnType(), "", ci); - ci->setOperand(0, newParamCast); - Instruction* newCast = new BitCastInst(UndefValue::get(F->getReturnType()), oldType, "", ci->getNextNode()); - ci->replaceAllUsesWith(newCast); - ci->mutateType(F->getReturnType()); - newCast->setOperand(0, ci); - } - ci->removeParamAttr(0, llvm::Attribute::ElementType); - ci->setCalledFunction(F); - } - else if(II == Intrinsic::cheerp_deallocate) + else { - Function* F = module.getFunction("free"); - assert(F); - ci->setCalledFunction(F); - Type* oldType = ci->getOperand(0)->getType(); - Type* newType = F->arg_begin()->getType(); - if(oldType != newType) - { - Instruction* newCast = new BitCastInst(ci->getOperand(0), newType, "", ci); - ci->setOperand(0, newCast); - } + extendLifetime(OrigFunc); } } @@ -1256,6 +1233,16 @@ void GlobalDepsAnalyzer::visitFunction(const Function* F, VisitedSet& visited) visitConstant(F->getPersonalityFn(), visited, Newsubexpr); } const Module* module = F->getParent(); + if (F->getName() == "malloc") + { + // If we are in opt, there is a chance that a following + // pass will convert malloc into a calloc, so keep that if we keep malloc + Function* fcalloc = module->getFunction("calloc"); + if (fcalloc && !llcPass) + { + extendLifetime(fcalloc); + } + } bool isAsmJS = F->getSection() == StringRef("asmjs"); for ( const BasicBlock & bb : *F ) for (const Instruction & I : bb) @@ -1279,9 +1266,8 @@ void GlobalDepsAnalyzer::visitFunction(const Function* F, VisitedSet& visited) else if ( const CallBase* CB = dyn_cast(&I) ) { DynamicAllocInfo ai (CB, DL, forceTypedArrays); - if ( !isAsmJS && ai.isValidAlloc() ) + if ( !isAsmJS && ai.isValidAlloc() && !TypeSupport::isAsmJSPointed(ai.getCastedPointedType())) { - assert(!TypeSupport::isAsmJSPointed(ai.getCastedPointedType())); if ( ai.useCreatePointerArrayFunc() ) hasPointerArrays = true; else if ( ai.useCreateArrayFunc() ) @@ -1327,66 +1313,6 @@ void GlobalDepsAnalyzer::visitFunction(const Function* F, VisitedSet& visited) // normal function called from asm.js else if (!calleeIsAsmJS && isAsmJS) asmJSImportedFuncions.insert(calledFunc); - - if (!llcPass && calledFunc->getName() == "malloc") - { - // If we are in opt, there is a chance that a following - // pass will convert malloc into a calloc, so keep that if we keep malloc - Function* fcalloc = module->getFunction("calloc"); - if (fcalloc) - { - extendLifetime(fcalloc); - } - } - } - // if this is an allocation intrinsic and we are in asmjs, - // visit the corresponding libc function. The same applies if the allocated type is asmjs. - else if (calledFunc->getIntrinsicID() == Intrinsic::cheerp_allocate || - calledFunc->getIntrinsicID() == Intrinsic::cheerp_allocate_array) - { - if (isAsmJS || TypeSupport::isAsmJSPointed(ci.getParamElementType(0))) - { - Function* fmalloc = module->getFunction("malloc"); - if (fmalloc) - { - if(!isAsmJS) - asmJSExportedFuncions.insert(fmalloc); - extendLifetime(fmalloc); - } - // If we are in opt, there is a chance that a following - // pass will convert malloc into a calloc, so keep that if we keep malloc - Function* fcalloc = module->getFunction("calloc"); - if (fcalloc && !llcPass) - { - extendLifetime(fcalloc); - } - } - } - else if (calledFunc->getIntrinsicID() == Intrinsic::cheerp_reallocate) - { - if (isAsmJS || TypeSupport::isAsmJSPointed(ci.getParamElementType(0))) - { - Function* frealloc = module->getFunction("realloc"); - if (frealloc) - { - if(!isAsmJS) - asmJSExportedFuncions.insert(frealloc); - extendLifetime(frealloc); - } - } - } - else if (calledFunc->getIntrinsicID() == Intrinsic::cheerp_deallocate) - { - Type* ty = ci.getOperand(0)->getType(); - bool basicType = !ty->isAggregateType(); - bool asmjsPtr = TypeSupport::isAsmJSPointer(ty); - Function* ffree = module->getFunction("free"); - if (ffree) - { - if(!isAsmJS && (basicType || asmjsPtr)) - asmJSExportedFuncions.insert(ffree); - extendLifetime(ffree); - } } else if (calledFunc->getIntrinsicID() == Intrinsic::memset) extendLifetime(module->getFunction("memset")); diff --git a/llvm/lib/CheerpUtils/LinearMemoryHelper.cpp b/llvm/lib/CheerpUtils/LinearMemoryHelper.cpp index 4c1080d8c740..f10ff4b48c52 100644 --- a/llvm/lib/CheerpUtils/LinearMemoryHelper.cpp +++ b/llvm/lib/CheerpUtils/LinearMemoryHelper.cpp @@ -593,14 +593,11 @@ if (!functionTypeIndices.count(fTy)) { \ #undef ADD_BUILTIN #undef ADD_FUNCTION_TYPE - // Check if the __genericjs__free function is present. If so, consider - // "free()" as if its address is taken - bool freeTaken = module->getFunction("__genericjs__free") != nullptr; // Build the function tables first for (const Function* F : asmjsFunctions_) { const FunctionType* fTy = F->getFunctionType(); - if (F->hasAddressTaken() || F->getName() == StringRef(wasmNullptrName) || (freeTaken && F->getName() == StringRef("free"))) { + if (F->hasAddressTaken() || F->getName() == StringRef(wasmNullptrName)) { auto it = functionTables.find(fTy); if (it == functionTables.end()) { @@ -815,23 +812,11 @@ int32_t LinearMemoryHelper::getThreadLocalOffset(const GlobalVariable* G) const uint32_t LinearMemoryHelper::getFunctionAddress(const llvm::Function* F) const { - if (F->getName() == StringRef("__genericjs__free")) - { - const Function* ffree = module->getFunction("free"); - assert(ffree); - F = ffree; - } assert(functionAddresses.count(F)); return functionAddresses.find(F)->second; } bool LinearMemoryHelper::functionHasAddress(const llvm::Function* F) const { - if (F->getName() == StringRef("__genericjs__free")) - { - const Function* ffree = module->getFunction("free"); - assert(ffree); - F = ffree; - } return functionAddresses.count(F); } uint32_t LinearMemoryHelper::getFunctionAddressMask(const llvm::FunctionType* Fty) const diff --git a/llvm/lib/CheerpUtils/NativeRewriter.cpp b/llvm/lib/CheerpUtils/NativeRewriter.cpp index b7884ba7ab44..98addd3ee9b6 100644 --- a/llvm/lib/CheerpUtils/NativeRewriter.cpp +++ b/llvm/lib/CheerpUtils/NativeRewriter.cpp @@ -441,11 +441,9 @@ bool CheerpNativeRewriterPass::rewriteNativeObjectsConstructors(Module& M, Funct if(called->getIntrinsicID() != Intrinsic::cheerp_allocate && called->getIntrinsicID() != Intrinsic::cheerp_allocate_array) continue; - //This should be a typed new - Type* t=i.getParamElementType(0); - assert(t); + Type* t=i.getRetElementType(); std::string builtinTypeName; - if(!t->isStructTy() || !isBuiltinType(t->getStructName().data(), builtinTypeName)) + if(!t || !t->isStructTy() || !isBuiltinType(t->getStructName().data(), builtinTypeName)) continue; rewriteNativeAllocationUsers(M,toRemove,&i,t,builtinTypeName); Changed = true; diff --git a/llvm/lib/CheerpUtils/PointerAnalyzer.cpp b/llvm/lib/CheerpUtils/PointerAnalyzer.cpp index 1684b793a99e..329c3d380960 100644 --- a/llvm/lib/CheerpUtils/PointerAnalyzer.cpp +++ b/llvm/lib/CheerpUtils/PointerAnalyzer.cpp @@ -954,15 +954,6 @@ PointerKindWrapper& PointerUsageVisitor::visitUse(PointerKindWrapper& ret, const return ret |= pointerKindData.getConstraintPtr(IndirectPointerKindConstraint(INDIRECT_ARG_CONSTRAINT, typeAndIndex)); } - if (isFreeFunctionName(calledFunction->getName())) - { - if (TypeSupport::isTypedArrayType(U->get()->getType()->getPointerElementType(), true)) - { - return ret |= PointerKindWrapper(SPLIT_REGULAR, p); - } - return ret |= COMPLETE_OBJECT; - } - unsigned argNo = cs->getArgOperandNo(U); if ( argNo >= calledFunction->arg_size() ) @@ -1393,15 +1384,6 @@ PointerConstantOffsetWrapper& PointerConstantOffsetVisitor::visitValue(PointerCo { return CacheAndReturn(ret |= Zero); } - if(F->getName() == "calloc" || - F->getName() == "malloc" || - F->getName() == "_Znwj" || - F->getName() == "_Znaj" || - F->getName() == "realloc") - { - return CacheAndReturn(ret |= Zero); - } - } // Handle global pointers diff --git a/llvm/lib/CheerpUtils/PointerPasses.cpp b/llvm/lib/CheerpUtils/PointerPasses.cpp index 95753df7a2d7..9af39cb57250 100644 --- a/llvm/lib/CheerpUtils/PointerPasses.cpp +++ b/llvm/lib/CheerpUtils/PointerPasses.cpp @@ -9,6 +9,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/IR/InstIterator.h" #include "llvm/InitializePasses.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" @@ -58,7 +59,6 @@ class AllocaArrays class FreeAndDeleteRemoval { private: - void deleteInstructionAndUnusedOperands(llvm::Instruction* I); bool isWasmTarget; public: explicit FreeAndDeleteRemoval(): isWasmTarget(false) { } @@ -69,7 +69,7 @@ bool AllocaArrays::replaceAlloca(AllocaInst* ai, cheerp::GlobalDepsAnalyzer& gda { const ConstantInt * ci = dyn_cast(ai->getArraySize()); - // Runtime alloca size, convert it to cheerp_allocate + // Runtime alloca size, convert it to cheerp_allocate if (!ci) { Module* M = ai->getParent()->getParent()->getParent(); @@ -77,13 +77,13 @@ bool AllocaArrays::replaceAlloca(AllocaInst* ai, cheerp::GlobalDepsAnalyzer& gda Type* int32Ty = IntegerType::getInt32Ty(M->getContext()); Type* allocTy = ai->getAllocatedType(); gda.visitDynSizedAlloca(allocTy); - Function* cheerp_allocate = Intrinsic::getDeclaration(M, Intrinsic::cheerp_allocate, ai->getType()); + Function* cheerp_allocate = Intrinsic::getDeclaration(M, Intrinsic::cheerp_allocate, {ai->getType(), ai->getType()}); IRBuilder<> Builder(ai); uint32_t elemSize = targetData.getTypeAllocSize(allocTy); Value* size = Builder.CreateMul(ai->getArraySize(), ConstantInt::get(int32Ty, elemSize, false)); - Instruction* alloc = CallInst::Create(cheerp_allocate, size); + Instruction* alloc = CallInst::Create(cheerp_allocate, {ConstantPointerNull::get(ai->getType()), size}); BasicBlock::iterator ii(ai); ReplaceInstWithInst(ai->getParent()->getInstList(), ii, alloc); return true; @@ -546,7 +546,7 @@ llvm::PreservedAnalyses PointerToImmutablePHIRemovalPass::run(Function& F, Funct return PA; } -void FreeAndDeleteRemoval::deleteInstructionAndUnusedOperands(Instruction* I) +static void deleteInstructionAndUnusedOperands(Instruction* I) { SmallVector operandsToErase; for(Value* op: I->operands()) @@ -562,44 +562,41 @@ void FreeAndDeleteRemoval::deleteInstructionAndUnusedOperands(Instruction* I) deleteInstructionAndUnusedOperands(I); } -static Function* getOrCreateGenericJSFree(Module& M, bool isAllGenericJS) +static Function* getOrCreateGenericJSFree(Module& M, Function* Orig) { - Function* Orig = M.getFunction("free"); - assert(Orig); FunctionType* Ty = Orig->getFunctionType(); - Function* New = cast(M.getOrInsertFunction("__genericjs__free", Ty).getCallee()); + std::string name = Twine("__genericjs__", Orig->getName()).str(); + Function* New = cast(M.getOrInsertFunction(name, Ty).getCallee()); if (!New->empty()) return New; New->addFnAttr(Attribute::NoInline); BasicBlock* Entry = BasicBlock::Create(M.getContext(),"entry", New); IRBuilder<> Builder(Entry); - if (!isAllGenericJS) - { - Type* VoidPtr = IntegerType::get(M.getContext(), 8)->getPointerTo(); - Type* Tys[] = { VoidPtr }; - Function *GetBase = Intrinsic::getDeclaration(&M, Intrinsic::cheerp_is_linear_heap, Tys); - Function *ElemSize = Intrinsic::getDeclaration(&M, Intrinsic::cheerp_pointer_elem_size, Tys); - - BasicBlock* ExitBlock = BasicBlock::Create(M.getContext(), "exitblk", New); - BasicBlock* ForwardBlock = BasicBlock::Create(M.getContext(), "fwdblk", New); - - Value* Params[] = { &*New->arg_begin() }; - CallInst* IntrCall = Builder.CreateCall(GetBase, Params); - Builder.CreateCondBr(IntrCall, ForwardBlock, ExitBlock); - - Builder.SetInsertPoint(ExitBlock); - Builder.CreateRetVoid(); - - Builder.SetInsertPoint(ForwardBlock); - Function *PtrOffset = Intrinsic::getDeclaration(&M, Intrinsic::cheerp_pointer_offset, Tys); - CallInst* Offset = Builder.CreateCall(PtrOffset, Params); - CallInst* Size = Builder.CreateCall(ElemSize, Params); - Value* OffsetShifted = Builder.CreateMul(Offset, Size); - Value* OffsetP = Builder.CreateIntToPtr(OffsetShifted, VoidPtr); - Value* Params2[] = { OffsetP }; - Builder.CreateCall(Orig, Params2); - } + Type* VoidPtr = IntegerType::get(M.getContext(), 8)->getPointerTo(); + Type* Tys[] = { VoidPtr }; + Function *GetBase = Intrinsic::getDeclaration(&M, Intrinsic::cheerp_is_linear_heap, Tys); + Function *ElemSize = Intrinsic::getDeclaration(&M, Intrinsic::cheerp_pointer_elem_size, Tys); + + BasicBlock* ExitBlock = BasicBlock::Create(M.getContext(), "exitblk", New); + BasicBlock* ForwardBlock = BasicBlock::Create(M.getContext(), "fwdblk", New); + + Value* Params[] = { &*New->arg_begin() }; + CallInst* IntrCall = Builder.CreateCall(GetBase, Params); + Builder.CreateCondBr(IntrCall, ForwardBlock, ExitBlock); + + Builder.SetInsertPoint(ExitBlock); + Builder.CreateRetVoid(); + + Builder.SetInsertPoint(ForwardBlock); + Function *PtrOffset = Intrinsic::getDeclaration(&M, Intrinsic::cheerp_pointer_offset, Tys); + CallInst* Offset = Builder.CreateCall(PtrOffset, Params); + CallInst* Size = Builder.CreateCall(ElemSize, Params); + Value* OffsetShifted = Builder.CreateMul(Offset, Size); + Value* OffsetP = Builder.CreateIntToPtr(OffsetShifted, VoidPtr); + Value* Params2[] = { OffsetP }; + Builder.CreateCall(Orig, Params2); + Builder.CreateRetVoid(); return New; @@ -609,63 +606,9 @@ bool FreeAndDeleteRemoval::runOnModule(Module& M) bool Changed = false; isWasmTarget = Triple(M.getTargetTriple()).isCheerpWasm(); - std::vector usesToBeReplaced; for (Function& f: M) { - if (cheerp::isFreeFunctionName(f.getName())) - { - auto UI = f.use_begin(), E = f.use_end(); - for (; UI != E;) - { - Use &U = *UI; - ++UI; - User* Usr = U.getUser(); - if (CallBase* call = dyn_cast(Usr)) - { - if (!isWasmTarget) - { - deleteInstructionAndUnusedOperands(call); - Changed = true; - continue; - } - } - - if (Instruction* inst = dyn_cast(Usr)) - { - Function* Parent = inst->getParent()->getParent(); - if (Parent->getSection() == StringRef("asmjs") || Parent->getName() == StringRef("__genericjs__free")) - { - continue; - } - U.set(getOrCreateGenericJSFree(M, !isWasmTarget)); - Changed = true; - } - else if (GlobalValue* gv = dyn_cast(Usr)) - { - if (gv->getSection() == StringRef("asmjs")) - { - continue; - } - U.set(getOrCreateGenericJSFree(M, !isWasmTarget)); - Changed = true; - } - else if (Constant* c = dyn_cast(Usr)) - { - if (isa(U.get()) && cheerp::isFreeFunctionName(cast(U.get())->getName())) - { - usesToBeReplaced.push_back(&U); - Changed = true; - } - } - else - { - U.set(getOrCreateGenericJSFree(M, !isWasmTarget)); - Changed = true; - } - - } - } - else if (f.getIntrinsicID() == Intrinsic::cheerp_deallocate) + if (f.getIntrinsicID() == Intrinsic::cheerp_deallocate) { auto UI = f.use_begin(), E = f.use_end(); for (; UI != E;) @@ -677,52 +620,22 @@ bool FreeAndDeleteRemoval::runOnModule(Module& M) bool asmjs = call->getParent()->getParent()->getSection()==StringRef("asmjs"); if (asmjs) continue; - Type* ty = call->getOperand(0)->getType(); - assert(isa(ty)); - Type* elemTy = cast(ty)->getPointerElementType(); - if (!isWasmTarget || (!cheerp::TypeSupport::isAsmJSPointed(elemTy) && elemTy->isAggregateType())) + Type* elemTy = call->getParamElementType(1); + if (!isWasmTarget || (elemTy && !cheerp::TypeSupport::isAsmJSPointed(elemTy) && elemTy->isAggregateType())) { deleteInstructionAndUnusedOperands(call); Changed = true; } - else if (cheerp::TypeSupport::isAsmJSPointed(elemTy)) - { - U.set(M.getFunction("free")); - } - else - { - U.set(getOrCreateGenericJSFree(M, false)); - } - } - } - } - - { - // TODO: Move to a proper pass - for(BasicBlock& BB: f) - { - for ( BasicBlock::iterator it = BB.begin(); it != BB.end(); ) - { - Instruction * Inst = &*it++; - if(isa(Inst)) - { - Inst->replaceAllUsesWith(Inst->getOperand(0)); - Inst->eraseFromParent(); - } - else if(isa(Inst)) + else if (!elemTy || !cheerp::TypeSupport::isAsmJSPointed(elemTy)) { - deleteInstructionAndUnusedOperands(Inst); + Function* origF = cast(call->getArgOperand(0)); + call->setArgOperand(0, getOrCreateGenericJSFree(M, origF)); } } } } } - if (!usesToBeReplaced.empty()) - { - cheerp::replaceSomeUsesWith(usesToBeReplaced, getOrCreateGenericJSFree(M, !isWasmTarget)); - } - return Changed; } @@ -734,6 +647,28 @@ PreservedAnalyses FreeAndDeleteRemovalPass::run(Module& M, ModuleAnalysisManager return PreservedAnalyses::none(); } +PreservedAnalyses FreezeAndAssumeRemovalPass::run(Function& F, FunctionAnalysisManager& FAM) +{ + bool Changed = false; + for (auto& I: make_early_inc_range(instructions(F))) + { + if(isa(I)) + { + I.replaceAllUsesWith(I.getOperand(0)); + I.eraseFromParent(); + Changed = true; + } + else if(isa(I)) + { + deleteInstructionAndUnusedOperands(&I); + Changed = true; + } + } + if (Changed) + return PreservedAnalyses::none(); + return PreservedAnalyses::all(); +} + uint32_t DelayInsts::countInputRegisters(const Instruction* I, cheerp::InlineableCache& cache) const { uint32_t count = 0; diff --git a/llvm/lib/CheerpUtils/TypeOptimizer.cpp b/llvm/lib/CheerpUtils/TypeOptimizer.cpp index d224ac12ec29..c4ba2f08315e 100644 --- a/llvm/lib/CheerpUtils/TypeOptimizer.cpp +++ b/llvm/lib/CheerpUtils/TypeOptimizer.cpp @@ -1117,15 +1117,26 @@ Function* TypeOptimizer::rewriteIntrinsic(Function* F, FunctionType* FT) SmallVector newTys; switch(F->getIntrinsicID()) { + case Intrinsic::cheerp_reallocate: + { + Type* localTys[] = { FT->getReturnType(), FT->getParamType(0), FT->getParamType(1)}; + newTys.insert(newTys.end(),localTys,localTys+3); + break; + } + case Intrinsic::cheerp_deallocate: + { + Type* localTys[] = { FT->getParamType(0), FT->getParamType(1)}; + newTys.insert(newTys.end(),localTys,localTys+2); + break; + } case Intrinsic::cheerp_upcast_collapsed: case Intrinsic::cheerp_cast_user: case Intrinsic::cheerp_downcast: case Intrinsic::cheerp_virtualcast: - case Intrinsic::cheerp_allocate: - case Intrinsic::cheerp_allocate_array: - case Intrinsic::cheerp_reallocate: case Intrinsic::cheerp_make_complete_object: case Intrinsic::cheerp_make_regular: + case Intrinsic::cheerp_allocate: + case Intrinsic::cheerp_allocate_array: { Type* localTys[] = { FT->getReturnType(), FT->getParamType(0)}; newTys.insert(newTys.end(),localTys,localTys+2); @@ -1133,7 +1144,6 @@ Function* TypeOptimizer::rewriteIntrinsic(Function* F, FunctionType* FT) } case Intrinsic::cheerp_downcast_current: case Intrinsic::cheerp_get_array_len: - case Intrinsic::cheerp_deallocate: case Intrinsic::cheerp_pointer_kind: case Intrinsic::cheerp_throw: case Intrinsic::cheerp_pointer_offset: diff --git a/llvm/lib/CheerpUtils/Utility.cpp b/llvm/lib/CheerpUtils/Utility.cpp index 320589f80ac2..33b9e0386b1d 100644 --- a/llvm/lib/CheerpUtils/Utility.cpp +++ b/llvm/lib/CheerpUtils/Utility.cpp @@ -12,6 +12,7 @@ #include #include #include "llvm/Cheerp/JsExport.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/InitializePasses.h" #include "llvm/Cheerp/Demangler.h" #include "llvm/Cheerp/EdgeContext.h" @@ -608,6 +609,62 @@ uint32_t getIntFromValue(const Value* v) return i->getZExtValue(); } + +CallInst* createCheerpAllocate(IRBuilderBase& Builder, + Function* origFunc, + Type* elementType, + Value* sizeArg, + bool use_array) +{ + unsigned AS = 0; + auto Intr = use_array? Intrinsic::cheerp_allocate_array : Intrinsic::cheerp_allocate; + PointerType* origFuncTy = origFunc? origFunc->getFunctionType()->getPointerTo(origFunc->getAddressSpace()) : Builder.getInt8PtrTy(); + Type* retTy = elementType? elementType->getPointerTo(AS) : Builder.getInt8PtrTy(AS); + Type* Tys[] = { retTy, origFuncTy }; + Constant* origFuncArg = origFunc? (Constant*)origFunc : (Constant*)ConstantPointerNull::get(origFuncTy); + CallInst* Call = Builder.CreateIntrinsic(Intr, Tys, {origFuncArg, sizeArg}); + if (elementType) + { + Call->addRetAttr(llvm::Attribute::get(Call->getContext(), llvm::Attribute::ElementType, elementType)); + } + return Call; +} +llvm::CallInst* createCheerpReallocate(llvm::IRBuilderBase& Builder, + llvm::Function* origFunc, + llvm::Type* elementType, + llvm::Value* ptrArg, + llvm::Value* sizeArg) +{ + unsigned AS = 0; + PointerType* origFuncTy = origFunc? origFunc->getFunctionType()->getPointerTo(origFunc->getAddressSpace()) : Builder.getInt8PtrTy(); + Type* retTy = elementType? elementType->getPointerTo(AS) : Builder.getInt8PtrTy(AS); + Type* Tys[] = { retTy, origFuncTy, ptrArg->getType() }; + Constant* origFuncArg = origFunc? (Constant*)origFunc : (Constant*)ConstantPointerNull::get(origFuncTy); + CallInst* Call = Builder.CreateIntrinsic(Intrinsic::cheerp_reallocate, Tys, {origFuncArg, ptrArg, sizeArg}); + if (elementType) + { + Call->addParamAttr(1, llvm::Attribute::get(Call->getContext(), llvm::Attribute::ElementType, elementType)); + Call->addRetAttr(llvm::Attribute::get(Call->getContext(), llvm::Attribute::ElementType, elementType)); + } + return Call; +} + +llvm::CallInst* createCheerpDeallocate(llvm::IRBuilderBase& Builder, + llvm::Function* origFunc, + llvm::Type* elementType, + llvm::Value* ptrArg) +{ + PointerType* origFuncTy = origFunc? origFunc->getFunctionType()->getPointerTo(origFunc->getAddressSpace()) : Builder.getInt8PtrTy(); + Type* Tys[] = { origFuncTy, ptrArg->getType() }; + Constant* origFuncArg = origFunc? (Constant*)origFunc : (Constant*)ConstantPointerNull::get(origFuncTy); + CallInst* Call = Builder.CreateIntrinsic(Intrinsic::cheerp_deallocate, Tys, {origFuncArg, ptrArg}); + if (elementType) + { + Call->addParamAttr(1, llvm::Attribute::get(Call->getContext(), llvm::Attribute::ElementType, elementType)); + } + return Call; +} + std::string valueObjectName(const Value* v) { std::ostringstream os; @@ -853,114 +910,40 @@ DynamicAllocInfo::AllocType DynamicAllocInfo::getAllocType( const CallBase* call { if (const Function * f = callV->getCalledFunction() ) { - if (f->getName() == "malloc") - ret = malloc; - else if (f->getName() == "calloc") - ret = calloc; - else if (f->getIntrinsicID() == Intrinsic::cheerp_allocate || + if (f->getIntrinsicID() == Intrinsic::cheerp_allocate || f->getIntrinsicID() == Intrinsic::cheerp_allocate_array) ret = cheerp_allocate; else if (f->getIntrinsicID() == Intrinsic::cheerp_reallocate) ret = cheerp_reallocate; - else if (f->getName() == "_Znwj") - ret = opnew; - else if (f->getName() == "_Znaj") - ret = opnew_array; } } - // As above, allocations of asmjs types are considered not_an_alloc - if (ret != not_an_alloc && TypeSupport::isAsmJSPointer(callV->getType())) - return not_an_alloc; return ret; } Type * DynamicAllocInfo::computeCastedElementType() const { - assert(isValidAlloc() ); - - if ( type == cheerp_allocate || type == cheerp_reallocate ) - { - assert( call->getType()->isPointerTy() ); - assert( call->getParamElementType(0) ); - return call->getParamElementType(0); - } - - auto getTypeForUse = [](const User * U) -> Type * - { - if ( isa(U) ) - return U->getType()->getNonOpaquePointerElementType(); - else if ( const IntrinsicInst * ci = dyn_cast(U) ) - if ( ci->getIntrinsicID() == Intrinsic::cheerp_cast_user ) - return ci->getParamElementType(0); - return nullptr; - }; - - auto firstNonNull = std::find_if( - call->user_begin(), - call->user_end(), - getTypeForUse); - - // If there are no casts, use i8* - if ( call->user_end() == firstNonNull ) - { - return Type::getInt8Ty(call->getContext()); - } - - Type * pt = getTypeForUse(*firstNonNull); - assert(pt); - - // Check that all uses are the same - if (! std::all_of( - std::next(firstNonNull), - call->user_end(), - [&]( const User * U ) { return getTypeForUse(U) == pt; }) ) - { -#ifndef NDEBUG - call->getParent()->getParent()->dump(); - llvm::errs() << "Can not deduce valid type for allocation instruction: " << call->getName() << '\n'; - llvm::errs() << "In function: " << call->getParent()->getParent()->getName() << "\n"; - llvm::errs() << "Allocation instruction: "; call->dump(); - llvm::errs() << "Pointer: "; pt->dump(); - llvm::errs() << "Usage:\n"; - for (auto u = call->user_begin(); u != call->user_end(); u++) - { - u->dump(); - } -#endif - llvm::report_fatal_error("Unsupported code found, please report a bug", false); + switch(type) + { + case cheerp_allocate: + case cheerp_reallocate: + assert( call->getRetElementType() ); + return call->getRetElementType(); + case not_an_alloc: + llvm_unreachable("not an alloc"); } - - return pt; } const Value * DynamicAllocInfo::getByteSizeArg() const { - assert( isValidAlloc() ); - if ( calloc == type ) - { - assert( call->arg_size() == 2 ); - return call->getOperand(1); - } - else if ( cheerp_allocate == type || cheerp_reallocate == type ) + switch(type) { - assert( call->arg_size() == 2 ); + case cheerp_allocate: return call->getOperand(1); + case cheerp_reallocate: + return call->getOperand(2); + case not_an_alloc: + llvm_unreachable("not an alloc"); } - - assert( call->arg_size() == 1 ); - return call->getOperand(0); -} - -const Value * DynamicAllocInfo::getNumberOfElementsArg() const -{ - assert( isValidAlloc() ); - - if ( type == calloc ) - { - assert( call->arg_size() == 2 ); - return call->getOperand(0); - } - return nullptr; } const Value * DynamicAllocInfo::getMemoryArg() const @@ -969,8 +952,8 @@ const Value * DynamicAllocInfo::getMemoryArg() const if ( type == cheerp_reallocate ) { - assert( call->arg_size() == 2 ); - return call->getOperand(0); + assert( call->arg_size() == 3 ); + return call->getOperand(1); } return nullptr; } @@ -978,8 +961,6 @@ const Value * DynamicAllocInfo::getMemoryArg() const bool DynamicAllocInfo::sizeIsRuntime() const { assert( isValidAlloc() ); - if ( getAllocType() == calloc && !isa (getNumberOfElementsArg() ) ) - return true; if ( isa(getByteSizeArg()) ) return false; return true; diff --git a/llvm/lib/CheerpWriter/CheerpWasmWriter.cpp b/llvm/lib/CheerpWriter/CheerpWasmWriter.cpp index 4d27ba690d7d..458ce6c42fda 100644 --- a/llvm/lib/CheerpWriter/CheerpWasmWriter.cpp +++ b/llvm/lib/CheerpWriter/CheerpWasmWriter.cpp @@ -2662,29 +2662,6 @@ bool CheerpWasmWriter::compileInlineInstruction(WasmBuffer& code, const Instruct encodeInst(WasmOpcode::RETURN, code); return true; } - case Intrinsic::cheerp_allocate: - case Intrinsic::cheerp_allocate_array: - { - skipFirstParam = true; - calledFunc = module.getFunction("malloc"); - if (!calledFunc) - llvm::report_fatal_error("missing malloc definition"); - break; - } - case Intrinsic::cheerp_reallocate: - { - calledFunc = module.getFunction("realloc"); - if (!calledFunc) - llvm::report_fatal_error("missing realloc definition"); - break; - } - case Intrinsic::cheerp_deallocate: - { - calledFunc = module.getFunction("free"); - if (!calledFunc) - llvm::report_fatal_error("missing free definition"); - break; - } case Intrinsic::eh_typeid_for: { auto& local = landingPadTable.getLocalTypeIdMap(currentFun); diff --git a/llvm/lib/CheerpWriter/CheerpWriter.cpp b/llvm/lib/CheerpWriter/CheerpWriter.cpp index c98c0206cf34..608a7ab85afb 100644 --- a/llvm/lib/CheerpWriter/CheerpWriter.cpp +++ b/llvm/lib/CheerpWriter/CheerpWriter.cpp @@ -503,52 +503,19 @@ uint32_t CheerpWriter::compileArraySize(const DynamicAllocInfo & info, bool shou if(inBytes) typeSize = 1; - bool closeMathImul = false; - uint32_t numElem = 1; - if(const Value* numberOfElements = info.getNumberOfElementsArg()) - { - if(isa(numberOfElements)) - numElem = getIntFromValue(numberOfElements); - else - { - assert(shouldPrint); - if(useMathImul) - { - stream << namegen.getBuiltinName(NameGenerator::Builtin::IMUL) << '('; - closeMathImul = true; - } - compileOperand(numberOfElements, LOWEST); - if(useMathImul) - stream << ','; - else - stream << '*'; - } - } if( !info.sizeIsRuntime() ) { uint32_t allocatedSize = getIntFromValue( info.getByteSizeArg() ); - numElem *= (allocatedSize+typeSize-1); - if(closeMathImul) - { - assert(shouldPrint); - // We need to multiply before we divide - stream << numElem; - stream << ")/" << typeSize << "|0"; - } + uint32_t nElems = allocatedSize / typeSize; + if(shouldPrint) + stream << nElems; else - { - if(shouldPrint) - stream << (numElem / typeSize); - else - return numElem / typeSize; - } + return nElems; } else { assert(shouldPrint); - compileOperand( info.getByteSizeArg(), closeMathImul?LOWEST:MUL_DIV ); - if(closeMathImul) - stream << ')'; + compileOperand( info.getByteSizeArg(), MUL_DIV); stream << '/' << typeSize << "|0"; } assert(shouldPrint); @@ -711,33 +678,6 @@ void CheerpWriter::compileAllocation(const DynamicAllocInfo & info) } } -CheerpWriter::COMPILE_INSTRUCTION_FEEDBACK CheerpWriter::compileFree(const Value* obj) -{ - // Only arrays of primitives can be backed by the linear heap - bool needsLinearCheck = TypeSupport::isTypedArrayType(obj->getType()->getPointerElementType(), /*forceTypedArray*/ true) && isWasmTarget; - if(const ConstantInt* CI = PA.getConstantOffsetForPointer(obj)) - { - // 0 is clearly not a good address in the linear address space - if(CI->getZExtValue() == 0) - needsLinearCheck = false; - } - else if(isa(obj)) - needsLinearCheck = false; - - if(!needsLinearCheck) - return COMPILE_EMPTY; - - Function* Free = module.getFunction("free"); - if (Free) - stream << getName(Free, 0) << '('; - else - stream << namegen.getBuiltinName(NameGenerator::Builtin::DUMMY); - compilePointerAs(obj, RAW, PARENT_PRIORITY::LOWEST); - stream << ')'; - - return COMPILE_OK; -} - void CheerpWriter::compileEscapedString(raw_ostream& stream, StringRef str, bool forJSON) { for(uint8_t c: str) @@ -1136,27 +1076,6 @@ CheerpWriter::COMPILE_INSTRUCTION_FEEDBACK CheerpWriter::handleBuiltinCall(const compileOperand(*it); return COMPILE_OK; } - else if(cheerp::isFreeFunctionName(ident) || intrinsicId==Intrinsic::cheerp_deallocate) - { - if (asmjs || TypeSupport::isAsmJSPointer((*it)->getType())) - { - Function* ffree = module.getFunction("free"); - if (!ffree) - llvm::report_fatal_error("missing free definition"); - if (ffree->empty() && asmjs) - stream << namegen.getBuiltinName(NameGenerator::Builtin::DUMMY); - else - stream << getName(ffree, 0); - stream <<'('; - compileOperand(*it, PARENT_PRIORITY::BIT_OR); - stream << "|0)"; - return COMPILE_OK; - } - else - { - return compileFree(*it); - } - } else if(ident=="fmod") { // Handle this internally, C++ does not have float mod operation @@ -1483,29 +1402,6 @@ CheerpWriter::COMPILE_INSTRUCTION_FEEDBACK CheerpWriter::handleBuiltinCall(const compileAllocation(da); return COMPILE_OK; } - if ((func->getIntrinsicID()==Intrinsic::cheerp_allocate || func->getIntrinsicID()==Intrinsic::cheerp_allocate_array) && - (asmjs || TypeSupport::isAsmJSPointed(callV.getParamElementType(0)))) - { - Function* fmalloc = module.getFunction("malloc"); - if (!fmalloc) - llvm::report_fatal_error("missing malloc definition"); - stream << getName(fmalloc, 0) << "("; - compileOperand(*(it+1), PARENT_PRIORITY::LOWEST); - stream << ")|0"; - return COMPILE_OK; - } - else if (asmjs && func->getIntrinsicID()==Intrinsic::cheerp_reallocate && (asmjs || TypeSupport::isAsmJSPointed(callV.getParamElementType(0)))) - { - Function* frealloc = module.getFunction("realloc"); - if (!frealloc) - llvm::report_fatal_error("missing realloc definition"); - stream << getName(frealloc, 0) <<'('; - compileOperand(*it); - stream << ','; - compileOperand(*(it+1)); - stream << ")|0"; - return COMPILE_OK; - } else if(ident=="cheerpCreate_ZN6client6StringC2EPKc") { // NativeRewriter has encoded the value as metadata if this optimization is possible diff --git a/llvm/lib/CheerpWriter/PreExecute.cpp b/llvm/lib/CheerpWriter/PreExecute.cpp index e0a678a351c4..22b4a5d99d3c 100644 --- a/llvm/lib/CheerpWriter/PreExecute.cpp +++ b/llvm/lib/CheerpWriter/PreExecute.cpp @@ -93,7 +93,7 @@ static GenericValue pre_execute_allocate_array(FunctionType *FT, ExecutionEngine *currentEE = PreExecute::currentPreExecutePass->currentEE; size_t size=(size_t)(Args[1].IntVal.getLimitedValue()); - llvm::Type *type = currentEE->getCurrentCallSite()->getParamElementType(0); + llvm::Type *type = currentEE->getCurrentCallSite()->getRetElementType(); bool asmjs = currentEE->getCurrentCaller()->getSection() == StringRef("asmjs") || TypeSupport::isAsmJSPointed(type); const DataLayout *DL = &PreExecute::currentPreExecutePass->currentModule->getDataLayout(); @@ -134,7 +134,7 @@ static GenericValue pre_execute_allocate(FunctionType *FT, #endif // Register this allocations in the pass - llvm::Type *type = currentEE->getCurrentCallSite()->getParamElementType(0); + llvm::Type *type = currentEE->getCurrentCallSite()->getRetElementType(); bool asmjs = currentEE->getCurrentCaller()->getSection() == StringRef("asmjs") || TypeSupport::isAsmJSPointed(type); PreExecute::currentPreExecutePass->recordTypedAllocation(type, size, (char*)ret, /*hasCookie*/ false, asmjs); @@ -145,8 +145,8 @@ static GenericValue pre_execute_allocate(FunctionType *FT, static GenericValue pre_execute_reallocate(FunctionType *FT, ArrayRef Args, AttributeList Attrs) { ExecutionEngine *currentEE = PreExecute::currentPreExecutePass->currentEE; - void *p = (void *)(currentEE->GVTORP(Args[0])); - size_t size=(size_t)(Args[1].IntVal.getLimitedValue()); + void *p = (void *)(currentEE->GVTORP(Args[1])); + size_t size=(size_t)(Args[2].IntVal.getLimitedValue()); void* ret = PreExecute::currentPreExecutePass->allocator->allocate(size); memset(ret, 0, size); if(p != nullptr) @@ -172,7 +172,7 @@ static GenericValue pre_execute_reallocate(FunctionType *FT, #endif // Register this allocations in the pass - llvm::Type *type = currentEE->getCurrentCallSite()->getParamElementType(0); + llvm::Type *type = currentEE->getCurrentCallSite()->getRetElementType(); bool asmjs = currentEE->getCurrentCaller()->getSection() == StringRef("asmjs") || TypeSupport::isAsmJSPointer(type); PreExecute::currentPreExecutePass->recordTypedAllocation(type, size, (char*)ret, /*hasCookie*/ false, asmjs); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index cc6e91e9d3e5..0951d98ba444 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -3273,10 +3273,16 @@ void Verifier::visitCallBase(CallBase &Call) { case Intrinsic::cheerp_upcast_collapsed: case Intrinsic::cheerp_virtualcast: case Intrinsic::cheerp_downcast: + Check(Attrs.hasParamAttr(0, Attribute::ElementType), "Cheerp Intrinsic should specify element type", Call); + break; case Intrinsic::cheerp_allocate: case Intrinsic::cheerp_allocate_array: + Check(!isa(II->getOperand(0)) || Attrs.hasRetAttr(Attribute::ElementType), "Cheerp Intrinsic should specify element type", Call); + break; case Intrinsic::cheerp_reallocate: - Check(Attrs.hasParamAttr(0, Attribute::ElementType), "Cheerp Intrinsic should specify element type", Call); + Check(!isa(II->getOperand(0)) || Attrs.hasParamAttr(1, Attribute::ElementType), "Cheerp Intrinsic should specify element type", Call); + Check(!isa(II->getOperand(0)) || Attrs.hasRetAttr(Attribute::ElementType), "Cheerp Intrinsic should specify element type", Call); + break; break; default: break; diff --git a/llvm/lib/Target/WebAssembly/CheerpWritePass.cpp b/llvm/lib/Target/WebAssembly/CheerpWritePass.cpp index cdf6f79b5080..f9d9e4e47dcb 100644 --- a/llvm/lib/Target/WebAssembly/CheerpWritePass.cpp +++ b/llvm/lib/Target/WebAssembly/CheerpWritePass.cpp @@ -232,11 +232,11 @@ bool CheerpWritePass::runOnModule(Module& M) FPM.addPass(cheerp::CheerpLowerSwitchPass(/*onlyLowerI64*/false)); FPM.addPass(cheerp::LowerAndOrBranchesPass()); FPM.addPass(cheerp::StructMemFuncLoweringPass()); + FPM.addPass(cheerp::FreezeAndAssumeRemovalPass()); MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); } - MPM.addPass(cheerp::FreeAndDeleteRemovalPass()); MPM.addPass(cheerp::GlobalDepsAnalyzerPass(mathMode, /*resolveAliases*/true)); MPM.addPass(cheerp::InvokeWrappingPass()); if (isWasmTarget) diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp index 1fe916edb0fd..a7cdf483595e 100644 --- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp @@ -20,6 +20,7 @@ #include "llvm/ADT/SmallString.h" #include "llvm/Analysis/PtrUseVisitor.h" #include "llvm/Analysis/StackLifetime.h" +#include "llvm/Cheerp/Utility.h" #include "llvm/Config/llvm-config.h" #include "llvm/IR/CFG.h" #include "llvm/IR/DIBuilder.h" @@ -1564,13 +1565,12 @@ static void createFramePtr(coro::Shape &Shape) { if (Shape.CheerpCoroAlloc) { // CHEERP: Replace cheerp_coro_alloc with cheerp_allocate, now that we know the // final frame type - Type* allocate_types[] = { FramePtrTy, FramePtrTy }; - Function* allocate = Intrinsic::getDeclaration(M, - Intrinsic::cheerp_allocate, allocate_types); - + Function* Malloc = nullptr; + if (FrameTy->hasAsmJS()) { + Malloc = cast(M->getOrInsertFunction("malloc", Builder.getInt8PtrTy(), Builder.getInt32Ty()).getCallee()); + } Builder.SetInsertPoint(Shape.CheerpCoroAlloc); - CallBase* Alloc = Builder.CreateCall(allocate, { ConstantPointerNull::get(FramePtrTy), Shape.CheerpCoroAlloc->getOperand(0)}); - Alloc->addParamAttr(0, llvm::Attribute::get(CB->getContext(), llvm::Attribute::ElementType, FrameTy)); + CallBase* Alloc = cheerp::createCheerpAllocate(Builder, Malloc, FrameTy, Shape.CheerpCoroAlloc->getOperand(0)); Value* BC = Builder.CreateBitCast(Alloc, Builder.getInt8PtrTy()); Shape.CheerpCoroAlloc->replaceAllUsesWith(BC); Shape.CheerpCoroAlloc->eraseFromParent();