diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h index df31ac579e52c..3ca0ca40ee42e 100644 --- a/src/coreclr/inc/daccess.h +++ b/src/coreclr/inc/daccess.h @@ -97,7 +97,7 @@ // pRS=pRS->pright; // else // { -// return pRS->pjit; +// return pRS->_pjit; // } // } // @@ -108,7 +108,7 @@ // In the assignment statement the compiler will automatically use // the implicit conversion from PTR_RangeSection to RangeSection*, // causing a host instance to be created. Finally, if an appropriate -// section is found the use of pRS->pjit will cause an implicit +// section is found the use of pRS->_pjit will cause an implicit // conversion from PTR_IJitManager to IJitManager. The VPTR code // will look at target memory to determine the actual derived class // for the JitManager and instantiate the right class in the host so diff --git a/src/coreclr/inc/dacvars.h b/src/coreclr/inc/dacvars.h index b1d915efb6f7e..df4adf84fc14a 100644 --- a/src/coreclr/inc/dacvars.h +++ b/src/coreclr/inc/dacvars.h @@ -75,7 +75,7 @@ #define UNKNOWN_POINTER_TYPE SIZE_T -DEFINE_DACVAR_VOLATILE(PTR_RangeSection, ExecutionManager__m_CodeRangeList, ExecutionManager::m_CodeRangeList) +DEFINE_DACVAR(PTR_RangeSectionMap, ExecutionManager__g_pCodeRangeMap, ExecutionManager::g_pCodeRangeMap) DEFINE_DACVAR(PTR_EECodeManager, ExecutionManager__m_pDefaultCodeMan, ExecutionManager::m_pDefaultCodeMan) DEFINE_DACVAR_VOLATILE(LONG, ExecutionManager__m_dwReaderCount, ExecutionManager::m_dwReaderCount) DEFINE_DACVAR_VOLATILE(LONG, ExecutionManager__m_dwWriterLock, ExecutionManager::m_dwWriterLock) diff --git a/src/coreclr/inc/vptr_list.h b/src/coreclr/inc/vptr_list.h index d8e6cd42bd7c3..4683dd86e6512 100644 --- a/src/coreclr/inc/vptr_list.h +++ b/src/coreclr/inc/vptr_list.h @@ -13,6 +13,7 @@ VPTR_CLASS(EECodeManager) VPTR_CLASS(RangeList) VPTR_CLASS(LockedRangeList) +VPTR_CLASS(CodeRangeMapRangeList) #ifdef EnC_SUPPORTED VPTR_CLASS(EditAndContinueModule) diff --git a/src/coreclr/nativeaot/Runtime/inc/daccess.h b/src/coreclr/nativeaot/Runtime/inc/daccess.h index a7853555aca61..3fa4d8b0008bf 100644 --- a/src/coreclr/nativeaot/Runtime/inc/daccess.h +++ b/src/coreclr/nativeaot/Runtime/inc/daccess.h @@ -95,7 +95,7 @@ // pRS=pRS->pright; // else // { -// return pRS->pjit; +// return pRS->_pjit; // } // } // @@ -106,7 +106,7 @@ // In the assignment statement the compiler will automatically use // the implicit conversion from PTR_RangeSection to RangeSection*, // causing a host instance to be created. Finally, if an appropriate -// section is found the use of pRS->pjit will cause an implicit +// section is found the use of pRS->_pjit will cause an implicit // conversion from PTR_IJitManager to IJitManager. The VPTR code // will look at target memory to determine the actual derived class // for the JitManager and instantiate the right class in the host so diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index b6879228acb58..3f379ee802e11 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3235,6 +3235,18 @@ RaiseException( IN DWORD nNumberOfArguments, IN CONST ULONG_PTR *lpArguments); +struct PAL_SEHException; + +PALIMPORT +VOID +PALAPI +RaiseExceptionProducePALExceptionOnly( + IN DWORD dwExceptionCode, + IN DWORD dwExceptionFlags, + IN DWORD nNumberOfArguments, + IN CONST ULONG_PTR *lpArguments, + PAL_SEHException *pPalException); + PALIMPORT VOID PALAPI diff --git a/src/coreclr/pal/src/exception/seh-unwind.cpp b/src/coreclr/pal/src/exception/seh-unwind.cpp index e94922cea0451..210c7c8838d74 100644 --- a/src/coreclr/pal/src/exception/seh-unwind.cpp +++ b/src/coreclr/pal/src/exception/seh-unwind.cpp @@ -951,4 +951,75 @@ RaiseException(IN DWORD dwExceptionCode, LOGEXIT("RaiseException returns\n"); } +/*++ +Function: + RaiseException + +See MSDN doc. +--*/ +// no PAL_NORETURN, as callers must assume this can return for continuable exceptions. +__attribute__((noinline)) +VOID +PALAPI +RaiseExceptionProducePALExceptionOnly(IN DWORD dwExceptionCode, + IN DWORD dwExceptionFlags, + IN DWORD nNumberOfArguments, + IN CONST ULONG_PTR *lpArguments, + PAL_SEHException *pPalException) +{ + // PERF_ENTRY_ONLY is used here because RaiseException may or may not + // return. We can not get latency data without PERF_EXIT. For this reason, + // PERF_ENTRY_ONLY is used to profile frequency only. + PERF_ENTRY(RaiseExceptionProducePALExceptionOnly); + ENTRY("RaiseExceptionProducePALExceptionOnly(dwCode=%#x, dwFlags=%#x, nArgs=%u, lpArguments=%p)\n", + dwExceptionCode, dwExceptionFlags, nNumberOfArguments, lpArguments); + + /* Validate parameters */ + if (dwExceptionCode & RESERVED_SEH_BIT) + { + WARN("Exception code %08x has bit 28 set; clearing it.\n", dwExceptionCode); + dwExceptionCode ^= RESERVED_SEH_BIT; + } + + if (nNumberOfArguments > EXCEPTION_MAXIMUM_PARAMETERS) + { + WARN("Number of arguments (%d) exceeds the limit " + "EXCEPTION_MAXIMUM_PARAMETERS (%d); ignoring extra parameters.\n", + nNumberOfArguments, EXCEPTION_MAXIMUM_PARAMETERS); + nNumberOfArguments = EXCEPTION_MAXIMUM_PARAMETERS; + } + + CONTEXT *contextRecord; + EXCEPTION_RECORD *exceptionRecord; + AllocateExceptionRecords(&exceptionRecord, &contextRecord); + + ZeroMemory(exceptionRecord, sizeof(EXCEPTION_RECORD)); + + exceptionRecord->ExceptionCode = dwExceptionCode; + exceptionRecord->ExceptionFlags = dwExceptionFlags; + exceptionRecord->ExceptionRecord = NULL; + exceptionRecord->ExceptionAddress = NULL; // will be set by RtlpRaiseException + exceptionRecord->NumberParameters = nNumberOfArguments; + if (nNumberOfArguments) + { + CopyMemory(exceptionRecord->ExceptionInformation, lpArguments, + nNumberOfArguments * sizeof(ULONG_PTR)); + } + + // Capture the context of RaiseException. + ZeroMemory(contextRecord, sizeof(CONTEXT)); + contextRecord->ContextFlags = CONTEXT_FULL; + CONTEXT_CaptureContext(contextRecord); + + // We have to unwind one level to get the actual context user code could be resumed at. + PAL_VirtualUnwind(contextRecord, NULL); + + exceptionRecord->ExceptionAddress = (void *)CONTEXTGetPC(contextRecord); + + *pPalException = PAL_SEHException(exceptionRecord, contextRecord); + + LOGEXIT("RaiseExceptionProducePALExceptionOnly returns\n"); + PERF_EXIT(RaiseExceptionProducePALExceptionOnly); +} + #endif // !HOST_WINDOWS diff --git a/src/coreclr/vm/assembly.cpp b/src/coreclr/vm/assembly.cpp index 1c3408ec3a8c1..586d11854c763 100644 --- a/src/coreclr/vm/assembly.cpp +++ b/src/coreclr/vm/assembly.cpp @@ -465,7 +465,6 @@ Assembly *Assembly::CreateDynamic(AssemblyBinder* pBinder, NativeAssemblyNamePar if ((access & ASSEMBLY_ACCESS_COLLECT) != 0) { AssemblyLoaderAllocator *pCollectibleLoaderAllocator = new AssemblyLoaderAllocator(); - pCollectibleLoaderAllocator->SetCollectible(); pLoaderAllocator = pCollectibleLoaderAllocator; // Some of the initialization functions are not virtual. Call through the derived class diff --git a/src/coreclr/vm/assemblynative.cpp b/src/coreclr/vm/assemblynative.cpp index 5636de48aa23b..526dbe4f61703 100644 --- a/src/coreclr/vm/assemblynative.cpp +++ b/src/coreclr/vm/assemblynative.cpp @@ -1135,7 +1135,6 @@ extern "C" INT_PTR QCALLTYPE AssemblyNative_InitializeAssemblyLoadContext(INT_PT { // Create a new AssemblyLoaderAllocator for an AssemblyLoadContext loaderAllocator = new AssemblyLoaderAllocator(); - loaderAllocator->SetCollectible(); GCX_COOP(); LOADERALLOCATORREF pManagedLoaderAllocator = NULL; diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp index 9399e7bd7b627..8cd2c26ba70c4 100644 --- a/src/coreclr/vm/ceeload.cpp +++ b/src/coreclr/vm/ceeload.cpp @@ -4397,7 +4397,7 @@ void Module::RunEagerFixupsUnlocked() ExecutionManager::AddCodeRange( base, base + (TADDR)pNativeImage->GetVirtualSize(), ExecutionManager::GetReadyToRunJitManager(), - RangeSection::RANGE_SECTION_READYTORUN, + RangeSection::RANGE_SECTION_NONE, this /* pHeapListOrZapModule */); } #endif // !DACCESS_COMPILE diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index a81a2bd9f663f..3f9a0b981ca91 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -49,14 +49,13 @@ SPTR_IMPL(EEJitManager, ExecutionManager, m_pEEJitManager); SPTR_IMPL(ReadyToRunJitManager, ExecutionManager, m_pReadyToRunJitManager); #endif -VOLATILE_SPTR_IMPL_INIT(RangeSection, ExecutionManager, m_CodeRangeList, NULL); +SPTR_IMPL(RangeSectionMap, ExecutionManager, g_pCodeRangeMap); VOLATILE_SVAL_IMPL_INIT(LONG, ExecutionManager, m_dwReaderCount, 0); VOLATILE_SVAL_IMPL_INIT(LONG, ExecutionManager, m_dwWriterLock, 0); #ifndef DACCESS_COMPILE CrstStatic ExecutionManager::m_JumpStubCrst; -CrstStatic ExecutionManager::m_RangeCrst; unsigned ExecutionManager::m_normal_JumpStubLookup; unsigned ExecutionManager::m_normal_JumpStubUnique; @@ -391,7 +390,7 @@ void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_R if (pRS != NULL) { for(int i = 0; i < unwindInfoCount; i++) - AddToUnwindInfoTable(&pRS->pUnwindInfoTable, &unwindInfo[i], pRS->LowAddress, pRS->HighAddress); + AddToUnwindInfoTable(&pRS->_pUnwindInfoTable, &unwindInfo[i], pRS->_range.RangeStart(), pRS->_range.RangeEndOpen()); } } @@ -409,14 +408,14 @@ void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_R _ASSERTE(pRS != NULL); if (pRS != NULL) { - _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); - if (pRS->pjit->GetCodeType() == (miManaged | miIL)) + _ASSERTE(pRS->_pjit->GetCodeType() == (miManaged | miIL)); + if (pRS->_pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. - EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); + EEJitManager* pJitMgr = (EEJitManager*)(pRS->_pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(entryPoint); for(ULONG i = 0; i < pHeader->GetNumberOfUnwindInfos(); i++) - RemoveFromUnwindInfoTable(&pRS->pUnwindInfoTable, pRS->LowAddress, pRS->LowAddress + pHeader->GetUnwindInfo(i)->BeginAddress); + RemoveFromUnwindInfoTable(&pRS->_pUnwindInfoTable, pRS->_range.RangeStart(), pRS->_range.RangeStart() + pHeader->GetUnwindInfo(i)->BeginAddress); } } } @@ -453,15 +452,15 @@ extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst; PCODE methodEntry =(PCODE) heapIterator.GetMethodCode(); RangeSection * pRS = ExecutionManager::FindCodeRange(methodEntry, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); - _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); - if (pRS != NULL && pRS->pjit->GetCodeType() == (miManaged | miIL)) + _ASSERTE(pRS->_pjit->GetCodeType() == (miManaged | miIL)); + if (pRS != NULL && pRS->_pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. - EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); + EEJitManager* pJitMgr = (EEJitManager*)(pRS->_pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(methodEntry); int unwindInfoCount = pHeader->GetNumberOfUnwindInfos(); for(int i = 0; i < unwindInfoCount; i++) - AddToUnwindInfoTable(&pRS->pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->LowAddress, pRS->HighAddress); + AddToUnwindInfoTable(&pRS->_pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->_range.RangeStart(), pRS->_range.RangeEndOpen()); } } } @@ -793,14 +792,17 @@ values: m_CodeRangeList and hold it while walking the lists Uses ReaderLockHolder (allows multiple reeaders with no writers) ----------------------------------------- ExecutionManager::FindCodeRange -ExecutionManager::FindZapModule +ExecutionManager::FindReadyToRunModule ExecutionManager::EnumMemoryRegions +AND +ExecutionManager::IsManagedCode +ExecutionManager::IsManagedCodeWithLock +The IsManagedCode checks are notable as they protect not just access to the RangeSection walking, +but the actual RangeSection while determining if a given ip IsManagedCode. Uses WriterLockHolder (allows single writer and no readers) ----------------------------------------- -ExecutionManager::AddRangeHelper -ExecutionManager::DeleteRangeHelper - +ExecutionManager::DeleteRange */ //----------------------------------------------------------------------------- @@ -4178,7 +4180,10 @@ BOOL EEJitManager::JitCodeToMethodInfo( _ASSERTE(pRangeSection != NULL); - TADDR start = dac_cast(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); + if (pRangeSection->_flags & RangeSection::RANGE_SECTION_RANGELIST) + return FALSE; + + TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return FALSE; @@ -4218,7 +4223,12 @@ StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSectio SUPPORTS_DAC; } CONTRACTL_END; - TADDR start = dac_cast(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); + if (pRangeSection->_flags & RangeSection::RANGE_SECTION_RANGELIST) + { + return pRangeSection->_pRangeList->GetCodeBlockKind(); + } + + TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return STUB_CODE_BLOCK_NOCODE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); @@ -4234,9 +4244,9 @@ TADDR EEJitManager::FindMethodCode(PCODE currentPC) } CONTRACTL_END; RangeSection * pRS = ExecutionManager::FindCodeRange(currentPC, ExecutionManager::GetScanFlags()); - if (pRS == NULL || (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0) + if (pRS == NULL || (pRS->_flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0) return STUB_CODE_BLOCK_NOCODE; - return dac_cast(pRS->pjit)->FindMethodCode(pRS, currentPC); + return dac_cast(pRS->_pjit)->FindMethodCode(pRS, currentPC); } // Finds the header corresponding to the code at offset "delta". @@ -4247,8 +4257,9 @@ TADDR EEJitManager::FindMethodCode(RangeSection * pRangeSection, PCODE currentPC LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(pRangeSection != NULL); + _ASSERTE(pRangeSection->_flags & RangeSection::RANGE_SECTION_CODEHEAP); - HeapList *pHp = dac_cast(pRangeSection->pHeapListOrZapModule); + HeapList *pHp = pRangeSection->_pHeapList; if ((currentPC < pHp->startAddress) || (currentPC > pHp->endAddress)) @@ -4659,12 +4670,13 @@ void ExecutionManager::Init() m_JumpStubCrst.Init(CrstJumpStubCache, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD)); - m_RangeCrst.Init(CrstExecuteManRangeLock, CRST_UNSAFE_ANYMODE); + g_pCodeRangeMap = new RangeSectionMap(); m_pDefaultCodeMan = new EECodeManager(); m_pEEJitManager = new EEJitManager(); + #ifdef FEATURE_READYTORUN m_pReadyToRunJitManager = new ReadyToRunJitManager(); #endif @@ -4688,7 +4700,9 @@ ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag) if (scanFlag == ScanReaderLock) return FindCodeRangeWithLock(currentPC); - return GetRangeSection(currentPC); + // Since ScanReaderLock is not set, then we should behave AS IF the ReaderLock is held + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return GetRangeSection(currentPC, &lockState); } //************************************************************************** @@ -4702,8 +4716,15 @@ ExecutionManager::FindCodeRangeWithLock(PCODE currentPC) SUPPORTS_DAC; } CONTRACTL_END; - ReaderLockHolder rlh; - return GetRangeSection(currentPC); + RangeSectionLockState lockState = RangeSectionLockState::None; + RangeSection *result = GetRangeSection(currentPC, &lockState); + if (lockState == RangeSectionLockState::NeedsLock) + { + ReaderLockHolder rlh; + lockState = RangeSectionLockState::ReaderLocked; + result = GetRangeSection(currentPC, &lockState); + } + return result; } @@ -4765,7 +4786,9 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC) if (GetScanFlags() == ScanReaderLock) return IsManagedCodeWithLock(currentPC); - return IsManagedCodeWorker(currentPC); + // Since ScanReaderLock is not set, then we must assume that the ReaderLock is effectively taken. + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return IsManagedCodeWorker(currentPC, &lockState); } //************************************************************************** @@ -4777,8 +4800,17 @@ BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC) GC_NOTRIGGER; } CONTRACTL_END; - ReaderLockHolder rlh; - return IsManagedCodeWorker(currentPC); + RangeSectionLockState lockState = RangeSectionLockState::None; + BOOL result = IsManagedCodeWorker(currentPC, &lockState); + + if (lockState == RangeSectionLockState::NeedsLock) + { + ReaderLockHolder rlh; + lockState = RangeSectionLockState::ReaderLocked; + result = IsManagedCodeWorker(currentPC, &lockState); + } + + return result; } //************************************************************************** @@ -4805,14 +4837,15 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCal return FALSE; } - return IsManagedCodeWorker(currentPC); + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return IsManagedCodeWorker(currentPC, &lockState); #endif } //************************************************************************** // Assumes that the ExecutionManager reader/writer lock is taken or that // it is safe not to take it. -BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) +BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC, RangeSectionLockState *pLockState) { CONTRACTL { NOTHROW; @@ -4823,16 +4856,16 @@ BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) // taken over the call to JitCodeToMethodInfo too so that nobody pulls out // the range section from underneath us. - RangeSection * pRS = GetRangeSection(currentPC); + RangeSection * pRS = GetRangeSection(currentPC, pLockState); if (pRS == NULL) return FALSE; - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) + if (pRS->_flags & RangeSection::RANGE_SECTION_CODEHEAP) { // Typically if we find a Jit Manager we are inside a managed method // but on we could also be in a stub, so we check for that // as well and we don't consider stub to be real managed code. - TADDR start = dac_cast(pRS->pjit)->FindMethodCode(pRS, currentPC); + TADDR start = dac_cast(pRS->_pjit)->FindMethodCode(pRS, currentPC); if (start == NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); @@ -4841,9 +4874,9 @@ BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) } #ifdef FEATURE_READYTORUN else - if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) + if (pRS->_pR2RModule != NULL) { - if (dac_cast(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) + if (dac_cast(pRS->_pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif @@ -4865,10 +4898,11 @@ BOOL ExecutionManager::IsReadyToRunCode(PCODE currentPC) // the range section from underneath us. #ifdef FEATURE_READYTORUN - RangeSection * pRS = GetRangeSection(currentPC); - if (pRS != NULL && (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)) + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // TODO! Looking at users of this API I don't know that the lock structure here is safe. Needs checking. + RangeSection * pRS = GetRangeSection(currentPC, &lockState); + if (pRS != NULL && (pRS->_pR2RModule != NULL)) { - if (dac_cast(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) + if (dac_cast(pRS->_pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif @@ -4898,7 +4932,7 @@ LPCWSTR ExecutionManager::GetJitName() } #endif // !FEATURE_MERGE_JIT_AND_ENGINE -RangeSection* ExecutionManager::GetRangeSection(TADDR addr) +RangeSection* ExecutionManager::GetRangeSection(TADDR addr, RangeSectionLockState *pLockState) { CONTRACTL { NOTHROW; @@ -4907,122 +4941,11 @@ RangeSection* ExecutionManager::GetRangeSection(TADDR addr) SUPPORTS_DAC; } CONTRACTL_END; - RangeSection * pHead = m_CodeRangeList; - - if (pHead == NULL) - { - return NULL; - } - - RangeSection *pCurr = pHead; - RangeSection *pLast = NULL; - -#ifndef DACCESS_COMPILE - RangeSection *pLastUsedRS = (pCurr != NULL) ? pCurr->pLastUsed : NULL; - - if (pLastUsedRS != NULL) - { - // positive case - if ((addr >= pLastUsedRS->LowAddress) && - (addr < pLastUsedRS->HighAddress) ) - { - return pLastUsedRS; - } - - RangeSection * pNextAfterLastUsedRS = pLastUsedRS->pnext; - - // negative case - if ((addr < pLastUsedRS->LowAddress) && - (pNextAfterLastUsedRS == NULL || addr >= pNextAfterLastUsedRS->HighAddress)) - { - return NULL; - } - } -#endif - - while (pCurr != NULL) - { - // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) - if (pCurr->LowAddress <= addr) - { - // Since we are sorted, once pCurr->HighAddress is less than addr - // then all subsequence ones will also be lower, so we are done. - if (addr >= pCurr->HighAddress) - { - // we'll return NULL and put pLast into pLastUsed - pCurr = NULL; - } - else - { - // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) - _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); - - // Found the matching RangeSection - // we'll return pCurr and put it into pLastUsed - pLast = pCurr; - } - - break; - } - pLast = pCurr; - pCurr = pCurr->pnext; - } - -#ifndef DACCESS_COMPILE - // Cache pCurr as pLastUsed in the head node - // Unless we are on an MP system with many cpus - // where this sort of caching actually diminishes scaling during server GC - // due to many processors writing to a common location - if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress()) - pHead->pLastUsed = pLast; -#endif - - return pCurr; -} - -RangeSection* ExecutionManager::GetRangeSectionAndPrev(RangeSection *pHead, TADDR addr, RangeSection** ppPrev) -{ - WRAPPER_NO_CONTRACT; - - RangeSection *pCurr; - RangeSection *pPrev; - RangeSection *result = NULL; - - for (pPrev = NULL, pCurr = pHead; - pCurr != NULL; - pPrev = pCurr, pCurr = pCurr->pnext) - { - // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) - if (pCurr->LowAddress > addr) - continue; - - if (addr >= pCurr->HighAddress) - break; - - // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) - _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); - - // Found the matching RangeSection - result = pCurr; - - // Write back pPrev to ppPrev if it is non-null - if (ppPrev != NULL) - *ppPrev = pPrev; - - break; - } - - // If we failed to find a match write NULL to ppPrev if it is non-null - if ((ppPrev != NULL) && (result == NULL)) - { - *ppPrev = NULL; - } - - return result; + return g_pCodeRangeMap->LookupRangeSection(addr, pLockState); } /* static */ -PTR_Module ExecutionManager::FindZapModule(TADDR currentData) +PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData) { CONTRACTL { @@ -5034,50 +4957,25 @@ PTR_Module ExecutionManager::FindZapModule(TADDR currentData) } CONTRACTL_END; - ReaderLockHolder rlh; - - RangeSection * pRS = GetRangeSection(currentData); - if (pRS == NULL) - return NULL; - - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) - return NULL; - #ifdef FEATURE_READYTORUN - if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) - return NULL; -#endif - - return dac_cast(pRS->pHeapListOrZapModule); -} + RangeSectionLockState lockState = RangeSectionLockState::None; + RangeSection * pRS = GetRangeSection(currentData, &lockState); + if (lockState != RangeSectionLockState::NeedsLock) + { + if (pRS == NULL) + return NULL; -/* static */ -PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData) -{ - CONTRACTL + return pRS->_pR2RModule; + } + else { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - STATIC_CONTRACT_HOST_CALLS; - SUPPORTS_DAC; + ReaderLockHolder rlh; + lockState = RangeSectionLockState::ReaderLocked; + pRS = GetRangeSection(currentData, &lockState); + if (pRS == NULL) + return NULL; + return pRS->_pR2RModule; } - CONTRACTL_END; - -#ifdef FEATURE_READYTORUN - ReaderLockHolder rlh; - - RangeSection * pRS = GetRangeSection(currentData); - if (pRS == NULL) - return NULL; - - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) - return NULL; - - if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) - return dac_cast(pRS->pHeapListOrZapModule);; - - return NULL; #else return NULL; #endif @@ -5095,118 +4993,91 @@ PTR_Module ExecutionManager::FindModuleForGCRefMap(TADDR currentData) } CONTRACTL_END; +#ifndef FEATURE_READYTORUN + return NULL; +#else RangeSection * pRS = FindCodeRange(currentData, ExecutionManager::GetScanFlags()); if (pRS == NULL) return NULL; - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) - return NULL; - -#ifdef FEATURE_READYTORUN - // RANGE_SECTION_READYTORUN is intentionally not filtered out here -#endif - - return dac_cast(pRS->pHeapListOrZapModule); + return pRS->_pR2RModule; +#endif // FEATURE_READYTORUN } #ifndef DACCESS_COMPILE -/* NGenMem depends on this entrypoint */ NOINLINE void ExecutionManager::AddCodeRange(TADDR pStartRange, TADDR pEndRange, IJitManager * pJit, RangeSection::RangeSectionFlags flags, - void * pHp) + PTR_Module pModule) { CONTRACTL { THROWS; GC_NOTRIGGER; + HOST_CALLS; + PRECONDITION(pStartRange < pEndRange); PRECONDITION(CheckPointer(pJit)); - PRECONDITION(CheckPointer(pHp)); + PRECONDITION(CheckPointer(pModule)); } CONTRACTL_END; - AddRangeHelper(pStartRange, - pEndRange, - pJit, - flags, - dac_cast(pHp)); + ReaderLockHolder rlh; + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // + + PTR_RangeSection pRange = g_pCodeRangeMap->AllocateRange(Range(pStartRange, pEndRange), pJit, flags, pModule, &lockState); + if (pRange == NULL) + ThrowOutOfMemory(); } -void ExecutionManager::AddRangeHelper(TADDR pStartRange, - TADDR pEndRange, - IJitManager * pJit, - RangeSection::RangeSectionFlags flags, - TADDR pHeapListOrZapModule) +NOINLINE +void ExecutionManager::AddCodeRange(TADDR pStartRange, + TADDR pEndRange, + IJitManager * pJit, + RangeSection::RangeSectionFlags flags, + PTR_HeapList pHp) { CONTRACTL { THROWS; GC_NOTRIGGER; HOST_CALLS; PRECONDITION(pStartRange < pEndRange); - PRECONDITION(pHeapListOrZapModule != NULL); + PRECONDITION(CheckPointer(pJit)); + PRECONDITION(CheckPointer(pHp)); } CONTRACTL_END; - RangeSection *pnewrange = new RangeSection; + ReaderLockHolder rlh; + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // - _ASSERTE(pEndRange > pStartRange); + PTR_RangeSection pRange = g_pCodeRangeMap->AllocateRange(Range(pStartRange, pEndRange), pJit, flags, pHp, &lockState); - pnewrange->LowAddress = pStartRange; - pnewrange->HighAddress = pEndRange; - pnewrange->pjit = pJit; - pnewrange->pnext = NULL; - pnewrange->flags = flags; - pnewrange->pLastUsed = NULL; - pnewrange->pHeapListOrZapModule = pHeapListOrZapModule; -#if defined(TARGET_AMD64) - pnewrange->pUnwindInfoTable = NULL; -#endif // defined(TARGET_AMD64) - { - CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList - - RangeSection * current = m_CodeRangeList; - RangeSection * previous = NULL; + if (pRange == NULL) + ThrowOutOfMemory(); +} - if (current != NULL) - { - while (true) - { - // Sort addresses top down so that more recently created ranges - // will populate the top of the list - if (pnewrange->LowAddress > current->LowAddress) - { - // Asserts if ranges are overlapping - _ASSERTE(pnewrange->LowAddress >= current->HighAddress); - pnewrange->pnext = current; +NOINLINE +void ExecutionManager::AddCodeRange(TADDR pStartRange, + TADDR pEndRange, + IJitManager * pJit, + RangeSection::RangeSectionFlags flags, + PTR_CodeRangeMapRangeList pRangeList) +{ + CONTRACTL { + THROWS; + GC_NOTRIGGER; + HOST_CALLS; + PRECONDITION(pStartRange < pEndRange); + PRECONDITION(CheckPointer(pJit)); + PRECONDITION(CheckPointer(pRangeList)); + } CONTRACTL_END; - if (previous == NULL) // insert new head - { - m_CodeRangeList = pnewrange; - } - else - { // insert in the middle - previous->pnext = pnewrange; - } - break; - } + ReaderLockHolder rlh; + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // - RangeSection * next = current->pnext; - if (next == NULL) // insert at end of list - { - current->pnext = pnewrange; - break; - } + PTR_RangeSection pRange = g_pCodeRangeMap->AllocateRange(Range(pStartRange, pEndRange), pJit, flags, pRangeList, &lockState); - // Continue walking the RangeSection list - previous = current; - current = next; - } - } - else - { - m_CodeRangeList = pnewrange; - } - } + if (pRange == NULL) + ThrowOutOfMemory(); } // Deletes a single range starting at pStartRange @@ -5217,114 +5088,64 @@ void ExecutionManager::DeleteRange(TADDR pStartRange) GC_NOTRIGGER; } CONTRACTL_END; - RangeSection *pCurr = NULL; - { - // Acquire the Crst before unlinking a RangeList. - // NOTE: The Crst must be acquired BEFORE we grab the writer lock, as the - // writer lock forces us into a forbid suspend thread region, and it's illegal - // to enter a Crst after the forbid suspend thread region is entered - CrstHolder ch(&m_RangeCrst); + RangeSection *pCurr = FindCodeRangeWithLock(pStartRange); + g_pCodeRangeMap->RemoveRangeSection(pCurr); + + +#if defined(TARGET_AMD64) + PTR_UnwindInfoTable unwindTable = pCurr->_pUnwindInfoTable; +#endif + { // Acquire the WriterLock and prevent any readers from walking the RangeList. // This also forces us to enter a forbid suspend thread region, to prevent // hijacking profilers from grabbing this thread and walking it (the walk may // require the reader lock, which would cause a deadlock). WriterLockHolder wlh; - RangeSection *pPrev = NULL; - - pCurr = GetRangeSectionAndPrev(m_CodeRangeList, pStartRange, &pPrev); - - // pCurr points at the Range that needs to be unlinked from the RangeList - if (pCurr != NULL) - { - - // If pPrev is NULL the head of this list is to be deleted - if (pPrev == NULL) - { - m_CodeRangeList = pCurr->pnext; - } - else - { - _ASSERT(pPrev->pnext == pCurr); - - pPrev->pnext = pCurr->pnext; - } - - // Clear the cache pLastUsed in the head node (if any) - RangeSection * head = m_CodeRangeList; - if (head != NULL) - { - head->pLastUsed = NULL; - } - - // - // Cannot delete pCurr here because we own the WriterLock and if this is - // a hosted scenario then the hosting api callback cannot occur in a forbid - // suspend region, which the writer lock is. - // - } + RangeSectionLockState lockState = RangeSectionLockState::WriteLocked; + + g_pCodeRangeMap->CleanupRangeSections(&lockState); + // Unlike the previous implementation, we no longer attempt to avoid freeing + // the memory behind the RangeSection here, as we do not support the hosting + // api taking over memory allocation. } // - // Now delete the node + // Now delete the unwind info table // - if (pCurr != NULL) - { #if defined(TARGET_AMD64) - if (pCurr->pUnwindInfoTable != 0) - delete pCurr->pUnwindInfoTable; + if (unwindTable != 0) + delete unwindTable; #endif // defined(TARGET_AMD64) - delete pCurr; - } } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE -void ExecutionManager::EnumRangeList(RangeSection* list, - CLRDataEnumMemoryFlags flags) +void RangeSection::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { - while (list != NULL) - { - // If we can't read the target memory, stop immediately so we don't work - // with broken data. - if (!DacEnumMemoryRegion(dac_cast(list), sizeof(*list))) - break; - - if (list->pjit.IsValid()) - { - list->pjit->EnumMemoryRegions(flags); - } - - if (!(list->flags & RangeSection::RANGE_SECTION_CODEHEAP)) - { - PTR_Module pModule = dac_cast(list->pHeapListOrZapModule); + if (!DacEnumMemoryRegion(dac_cast(this), sizeof(*this))) + return; - if (pModule.IsValid()) - { - pModule->EnumMemoryRegions(flags, true); - } - } + if (_pjit.IsValid()) + { + _pjit->EnumMemoryRegions(flags); + } - list = list->pnext; -#if defined (_DEBUG) - // Test hook: when testing on debug builds, we want an easy way to test that the while - // correctly terminates in the face of ridiculous stuff from the target. - if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1) +#ifdef FEATURE_READYTORUN + if (_pR2RModule != NULL) + { + if (_pR2RModule.IsValid()) { - // Force us to struggle on with something bad. - if (list == NULL) - { - list = (RangeSection *)&flags; - } + _pR2RModule->EnumMemoryRegions(flags, true); } -#endif // (_DEBUG) - } +#endif // FEATURE_READYTORUN } + void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { STATIC_CONTRACT_HOST_CALLS; @@ -5335,16 +5156,16 @@ void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) // Report the global data portions. // - m_CodeRangeList.EnumMem(); + g_pCodeRangeMap.EnumMem(); m_pDefaultCodeMan.EnumMem(); // // Walk structures and report. // - if (m_CodeRangeList.IsValid()) + if (g_pCodeRangeMap.IsValid()) { - EnumRangeList(m_CodeRangeList, flags); + g_pCodeRangeMap->EnumMemoryRegions(flags); } } #endif // #ifdef DACCESS_COMPILE @@ -6011,7 +5832,7 @@ ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKE SUPPORTS_DAC; } CONTRACTL_END; - return dac_cast(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); + return MethodToken.m_pRangeSection->_pR2RModule->GetReadyToRunInfo(); } UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken) @@ -6148,9 +5969,9 @@ StubCodeBlockKind ReadyToRunJitManager::GetStubCodeBlockKind(RangeSection * pRan } CONTRACTL_END; - DWORD rva = (DWORD)(currentPC - pRangeSection->LowAddress); + DWORD rva = (DWORD)(currentPC - pRangeSection->_range.RangeStart()); - PTR_ReadyToRunInfo pReadyToRunInfo = dac_cast(pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); + PTR_ReadyToRunInfo pReadyToRunInfo = pRangeSection->_pR2RModule->GetReadyToRunInfo(); PTR_IMAGE_DATA_DIRECTORY pDelayLoadMethodCallThunksDir = pReadyToRunInfo->GetDelayMethodCallThunksSection(); if (pDelayLoadMethodCallThunksDir != NULL) @@ -6299,11 +6120,11 @@ BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, TADDR currentInstr = PCODEToPINSTR(currentPC); - TADDR ImageBase = pRangeSection->LowAddress; + TADDR ImageBase = pRangeSection->_range.RangeStart(); DWORD RelativePc = (DWORD)(currentInstr - ImageBase); - Module * pModule = dac_cast(pRangeSection->pHeapListOrZapModule); + Module * pModule = pRangeSection->_pR2RModule; ReadyToRunInfo * pInfo = pModule->GetReadyToRunInfo(); COUNT_T nRuntimeFunctions = pInfo->m_nRuntimeFunctions; diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 8e432e6e5209b..4bda5d970d231 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -100,6 +100,8 @@ enum StubCodeBlockKind : int STUB_CODE_BLOCK_JUMPSTUB, STUB_CODE_BLOCK_PRECODE, STUB_CODE_BLOCK_DYNAMICHELPER, + STUB_CODE_BLOCK_STUBPRECODE, + STUB_CODE_BLOCK_FIXUPPRECODE, // Last valid value. Note that the definition is duplicated in debug\daccess\fntableaccess.cpp STUB_CODE_BLOCK_LAST = 0xF, // Placeholders returned by code:GetStubCodeBlockKind @@ -608,44 +610,610 @@ class UnwindInfoTable { // address range to track the code heaps. typedef DPTR(struct RangeSection) PTR_RangeSection; +typedef VPTR(class CodeRangeMapRangeList) PTR_CodeRangeMapRangeList; -struct RangeSection +class RangeSectionMap; + +class Range { - TADDR LowAddress; - TADDR HighAddress; + // [begin,end) (This is an inclusive range) + TADDR begin; + TADDR end; - PTR_IJitManager pjit; // The owner of this address range +public: + Range(TADDR begin, TADDR end) : begin(begin), end(end) + { + assert(end >= begin); + } -#ifndef DACCESS_COMPILE - // Volatile because of the list can be walked lock-free - Volatile pnext; // link rangesections in a sorted list -#else - PTR_RangeSection pnext; -#endif + bool IsInRange(TADDR address) const + { + return address >= begin && address < end; + } + + TADDR RangeSize() const + { + return end - begin; + } + + TADDR RangeStart() const + { + return begin; + } - PTR_RangeSection pLastUsed; // for the head node only: a link to rangesections that was used most recently + TADDR RangeEnd() const + { + assert(RangeSize() > 0); + return end - 1; + } + + TADDR RangeEndOpen() const + { + return end; + } +}; +struct RangeSection +{ + friend class RangeSectionMap; enum RangeSectionFlags { RANGE_SECTION_NONE = 0x0, RANGE_SECTION_COLLECTIBLE = 0x1, RANGE_SECTION_CODEHEAP = 0x2, + RANGE_SECTION_RANGELIST = 0x4, + }; + #ifdef FEATURE_READYTORUN - RANGE_SECTION_READYTORUN = 0x4, + RangeSection(Range range, IJitManager* pJit, RangeSectionFlags flags, PTR_Module pR2RModule) : + _range(range), + _flags(flags), + _pjit(pJit), + _pR2RModule(pR2RModule), + _pHeapList(dac_cast((TADDR)0)), + _pRangeList(dac_cast((TADDR)0)) +#if defined(TARGET_AMD64) + , _pUnwindInfoTable(dac_cast((TADDR)0)) +#endif + { + assert(!(flags & RANGE_SECTION_COLLECTIBLE)); + assert(pR2RModule != NULL); + } +#endif + + RangeSection(Range range, IJitManager* pJit, RangeSectionFlags flags, PTR_HeapList pHeapList) : + _range(range), + _flags(flags), + _pjit(pJit), + _pR2RModule(dac_cast((TADDR)0)), + _pHeapList(pHeapList), + _pRangeList(dac_cast((TADDR)0)) +#if defined(TARGET_AMD64) + , _pUnwindInfoTable(dac_cast((TADDR)0)) +#endif + {} + + RangeSection(Range range, IJitManager* pJit, RangeSectionFlags flags, PTR_CodeRangeMapRangeList pRangeList) : + _range(range), + _flags(flags), + _pjit(pJit), + _pR2RModule(dac_cast((TADDR)0)), + _pHeapList(dac_cast((TADDR)0)), + _pRangeList(pRangeList) +#if defined(TARGET_AMD64) + , _pUnwindInfoTable(dac_cast((TADDR)0)) #endif + {} + +#ifdef DACCESS_COMPILE + void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); +#endif + + const Range _range; + const RangeSectionFlags _flags; + const PTR_IJitManager _pjit; + const PTR_Module _pR2RModule; + const PTR_HeapList _pHeapList; + const PTR_CodeRangeMapRangeList _pRangeList; + +#if defined(TARGET_AMD64) + PTR_UnwindInfoTable _pUnwindInfoTable; // Points to unwind information for this memory range. +#endif // defined(TARGET_AMD64) + + + RangeSection* _pRangeSectionNextForDelete = nullptr; // Used for adding to the cleanup list +}; + +enum class RangeSectionLockState +{ + None, + NeedsLock, + ReaderLocked, + WriteLocked, +}; + +// For 64bit, we work with 8KB chunks of memory holding pointers to the next level. This provides 10 bits of address resolution per level. +// For *reasons* the X64 hardware is limited to 57bits of addressable address space, and the minimum granularity that makes sense for range lists is 64KB (or every 2^16 bits) +// Similarly the Arm64 specification requires addresses to use at most 52 bits. Thus we use the maximum addressable range of X64 to provide the real max range +// So the first level is bits [56:47] -> L4 +// Then [46:37] -> L3 +// [36:27] -> L2 +// [26:17] -> L1 +// This leaves 17 bits of the address to be handled by the RangeSection linked list +// +// For 32bit VA processes, use 1KB chunks holding pointers to the next level. This provides 8 bites of address resolution per level. [31:24] and [23:16]. + +// The memory safety model for segment maps is that the pointers held within the individual segments can never change other than to go from NULL to a meaningful pointer, +// except for the final level, which is only permitted to change when CleanupRangeSections is in use. + +class RangeSectionMap +{ + class RangeSectionFragment; + + // Helper structure which forces all access to the various pointers to be handled via volatile/interlocked operations + // The copy/move constructors are all deleted to forbid accidental reads into temporaries, etc. + class RangeSectionFragmentPointer + { + private: + TADDR _ptr; + + TADDR FragmentToPtr(RangeSectionFragment* fragment) + { + TADDR ptr = (TADDR)fragment; + if (ptr == 0) + return ptr; + + if (fragment->isCollectibleRangeSectionFragment) + { + ptr += 1; + } + + return ptr; + } + + RangeSectionFragmentPointer() { _ptr = 0; } + public: + + RangeSectionFragmentPointer(RangeSectionFragmentPointer &) = delete; + RangeSectionFragmentPointer(RangeSectionFragmentPointer &&) = delete; + RangeSectionFragmentPointer& operator=(const RangeSectionFragmentPointer&) = delete; + + bool PointerIsCollectible() + { + return ((_ptr & 1) == 1); + } + + bool IsNull() + { + return _ptr == 0; + } + + RangeSectionFragment* VolatileLoadWithoutBarrier(RangeSectionLockState *pLockState) + { + uintptr_t ptr = ::VolatileLoadWithoutBarrier(&_ptr); + if ((ptr & 1) == 1) + { + if ((*pLockState == RangeSectionLockState::None) || (*pLockState == RangeSectionLockState::NeedsLock)) + { + *pLockState = RangeSectionLockState::NeedsLock; + return NULL; + } + return (RangeSectionFragment*)(ptr - 1); + } + else + { + return (RangeSectionFragment*)(ptr); + } + } + + void VolatileStore(RangeSectionFragment* fragment) + { + ::VolatileStore(&_ptr, FragmentToPtr(fragment)); + } + + bool AtomicReplace(RangeSectionFragment* newFragment, RangeSectionFragment* oldFragment) + { + TADDR oldPtr = FragmentToPtr(oldFragment); + TADDR newPtr = FragmentToPtr(newFragment); + + return oldPtr == InterlockedCompareExchangeT(&_ptr, newPtr, oldPtr); + } }; - DWORD flags; + // Unlike a RangeSection, a RangeSectionFragment cannot span multiple elements of the last level of the RangeSectionMap + // Always allocated via memset/free + class RangeSectionFragment + { + public: + RangeSectionFragmentPointer pRangeSectionFragmentNext; + Range _range; + PTR_RangeSection pRangeSection; + bool InRange(TADDR address) { return _range.IsInRange(address) && pRangeSection->_pRangeSectionNextForDelete == NULL; } + bool isPrimaryRangeSectionFragment; // RangeSectionFragment are allocated in arrays, but we only need to free the first allocated one. It will be marked with this flag. + bool isCollectibleRangeSectionFragment; // RangeSectionFragments + }; + +#ifdef TARGET_64BIT + static const uintptr_t entriesPerMapLevel = 1024; +#else + static const uintptr_t entriesPerMapLevel = 256; +#endif + + typedef RangeSectionFragmentPointer RangeSectionList; + typedef RangeSectionList RangeSectionL1[entriesPerMapLevel]; + typedef RangeSectionL1* RangeSectionL2[entriesPerMapLevel]; + typedef RangeSectionL2* RangeSectionL3[entriesPerMapLevel]; + typedef RangeSectionL3* RangeSectionL4[entriesPerMapLevel]; + +#ifdef TARGET_64BIT + typedef RangeSectionL4 RangeSectionTopLevel; + static const uintptr_t mapLevels = 4; + static const uintptr_t maxSetBit = 56; // This is 0 indexed + static const uintptr_t bitsPerLevel = 10; +#else + typedef RangeSectionL2 RangeSectionTopLevel; + static const uintptr_t mapLevels = 2; + static const uintptr_t maxSetBit = 31; // This is 0 indexed + static const uintptr_t bitsPerLevel = 8; +#endif + + RangeSectionTopLevel *_topLevel = nullptr; + + RangeSection* _pCleanupList; + + const uintptr_t bitsAtLastLevel = maxSetBit - (bitsPerLevel * mapLevels) + 1; + const uintptr_t bytesAtLastLevel = (((uintptr_t)1) << bitsAtLastLevel); + + RangeSection* EndOfCleanupListMarker() { return (RangeSection*)1; } + + void* AllocateLevel() + { + size_t size = entriesPerMapLevel * sizeof(void*); + void *buf = malloc(size); + memset(buf, 0, size); + return buf; + } + + uintptr_t EffectiveBitsForLevel(TADDR address, uintptr_t level) + { + TADDR addressAsInt = address; + TADDR addressBitsUsedInMap = addressAsInt >> (maxSetBit + 1 - (mapLevels * bitsPerLevel)); + TADDR addressBitsShifted = addressBitsUsedInMap >> ((level - 1) * bitsPerLevel); + TADDR addressBitsUsedInLevel = (entriesPerMapLevel - 1) & addressBitsShifted; + return addressBitsUsedInLevel; + } + + template + auto EnsureLevel(TADDR address, T* outerLevel, uintptr_t level) -> decltype(&((**outerLevel)[0])) + { + uintptr_t index = EffectiveBitsForLevel(address, level); + auto levelToGetPointerIn = VolatileLoadWithoutBarrier(outerLevel); + + if (levelToGetPointerIn == NULL) + { + auto levelNew = static_cast(AllocateLevel()); + if (levelNew == NULL) + return NULL; + auto levelPreviouslyStored = InterlockedCompareExchangeT(outerLevel, levelNew, NULL); + if (levelPreviouslyStored != nullptr) + { + // Handle race where another thread grew the table + levelToGetPointerIn = levelPreviouslyStored; + free(levelNew); + } + else + { + levelToGetPointerIn = levelNew; + } + assert(levelToGetPointerIn != nullptr); + } + + return &((*levelToGetPointerIn)[index]); + } + + // Returns pointer to address in last level map that actually points at RangeSection space. + RangeSectionFragmentPointer* EnsureMapsForAddress(TADDR address) + { + uintptr_t level = mapLevels + 1; +#ifdef TARGET_64BIT + auto _RangeSectionL3 = EnsureLevel(address, &_topLevel, --level); + if (_RangeSectionL3 == NULL) + return NULL; // Failure case + auto _RangeSectionL2 = EnsureLevel(address, _RangeSectionL3, --level); + if (_RangeSectionL2 == NULL) + return NULL; // Failure case +#else + auto _RangeSectionL2 = &_topLevel; +#endif + auto _RangeSectionL1 = EnsureLevel(address, _RangeSectionL2, --level); + if (_RangeSectionL1 == NULL) + return NULL; // Failure case + + auto result = EnsureLevel(address, _RangeSectionL1, --level); + if (result == NULL) + return NULL; // Failure case + + return result; + } + + RangeSectionFragment* GetRangeSectionForAddress(TADDR address, RangeSectionLockState *pLockState) + { +#ifdef TARGET_64BIT + auto _RangeSectionL4 = VolatileLoad(&_topLevel); + if (_RangeSectionL4 == NULL) + return NULL; + auto _RangeSectionL3 = (*_RangeSectionL4)[EffectiveBitsForLevel(address, 4)]; + if (_RangeSectionL3 == NULL) + return NULL; + auto _RangeSectionL2 = (*_RangeSectionL3)[EffectiveBitsForLevel(address, 3)]; +#else + auto _RangeSectionL2 = VolatileLoad(&_topLevel); +#endif + if (_RangeSectionL2 == NULL) + return NULL; + auto _RangeSectionL1 = (*_RangeSectionL2)[EffectiveBitsForLevel(address, 2)]; + if (_RangeSectionL1 == NULL) + return NULL; + + return ((*_RangeSectionL1)[EffectiveBitsForLevel(address, 1)]).VolatileLoadWithoutBarrier(pLockState); + } + + uintptr_t RangeSectionFragmentCount(PTR_RangeSection pRangeSection) + { + uintptr_t rangeSize = pRangeSection->_range.RangeSize(); + if (rangeSize == 0) + return 0; + + // Account for the range not starting at the beginning of a last level fragment + rangeSize += pRangeSection->_range.RangeStart() & (this->bytesAtLastLevel - 1); + + uintptr_t fragmentCount = ((rangeSize - 1) / bytesAtLastLevel) + 1; + return fragmentCount; + } + + TADDR IncrementAddressByMaxSizeOfFragment(TADDR input) + { + return input + bytesAtLastLevel; + } + + bool AttachRangeSectionToMap(PTR_RangeSection pRangeSection, RangeSectionLockState *pLockState) + { + assert(*pLockState == RangeSectionLockState::ReaderLocked); // Must be locked so that the cannot fail case, can't fail. NOTE: This only needs the reader lock, as the attach process can happen in parallel to reads. + + // Currently all use of the RangeSection should be with aligned addresses, so validate that the start and end are at aligned boundaries + assert((pRangeSection->_range.RangeStart() & 0xF) == 0); + assert((pRangeSection->_range.RangeEnd() & 0xF) == 0xF); + assert((pRangeSection->_range.RangeEndOpen() & 0xF) == 0); + + uintptr_t rangeSectionFragmentCount = RangeSectionFragmentCount(pRangeSection); + size_t fragmentsSize = rangeSectionFragmentCount * sizeof(RangeSectionFragment); + void* fragmentsMemory = (RangeSectionFragment*)malloc(fragmentsSize); + memset(fragmentsMemory, 0, fragmentsSize); + + RangeSectionFragment* fragments = (RangeSectionFragment*)fragmentsMemory; + + if (fragments == NULL) + { + return false; + } + + size_t entryUpdateSize = rangeSectionFragmentCount * sizeof(RangeSectionFragmentPointer*); + RangeSectionFragmentPointer** entriesInMapToUpdate = (RangeSectionFragmentPointer**)malloc(entryUpdateSize); + memset(entriesInMapToUpdate, 0, entryUpdateSize); + if (entriesInMapToUpdate == NULL) + { + free(fragments); + return false; + } + + fragments[0].isPrimaryRangeSectionFragment = true; + + TADDR addressToPrepForUpdate = pRangeSection->_range.RangeStart(); + + // Assert that range is not already mapped in any way + assert(LookupRangeSection(addressToPrepForUpdate, pLockState) == NULL); + assert(LookupRangeSection(pRangeSection->_range.RangeEnd(), pLockState) == NULL); + for (TADDR fragmentAddress = addressToPrepForUpdate; pRangeSection->_range.IsInRange(fragmentAddress); fragmentAddress = IncrementAddressByMaxSizeOfFragment(fragmentAddress)) + { + assert(LookupRangeSection(fragmentAddress, pLockState) == NULL); + } + + for (uintptr_t iFragment = 0; iFragment < rangeSectionFragmentCount; iFragment++) + { + fragments[iFragment].pRangeSection = pRangeSection; + fragments[iFragment]._range = pRangeSection->_range; + fragments[iFragment].isCollectibleRangeSectionFragment = !!(pRangeSection->_flags & RangeSection::RANGE_SECTION_COLLECTIBLE); + RangeSectionFragmentPointer* entryInMapToUpdate = EnsureMapsForAddress(addressToPrepForUpdate); + if (entryInMapToUpdate == NULL) + { + free(fragments); + free(entriesInMapToUpdate); + return false; + } + + entriesInMapToUpdate[iFragment] = entryInMapToUpdate; + addressToPrepForUpdate = IncrementAddressByMaxSizeOfFragment(addressToPrepForUpdate); + } + + // At this point all the needed memory is allocated, and it is no longer possible to fail. + for (uintptr_t iFragment = 0; iFragment < rangeSectionFragmentCount; iFragment++) + { + do + { + RangeSectionFragment* initialFragmentInMap = entriesInMapToUpdate[iFragment]->VolatileLoadWithoutBarrier(pLockState); + fragments[iFragment].pRangeSectionFragmentNext.VolatileStore(initialFragmentInMap); + if (entriesInMapToUpdate[iFragment]->AtomicReplace(&(fragments[iFragment]), initialFragmentInMap)) + break; + } while (true); + } + + // Assert that range is now found via lookup + assert(LookupRangeSection(pRangeSection->_range.RangeStart(), pLockState) == pRangeSection); + assert(LookupRangeSection(pRangeSection->_range.RangeEnd(), pLockState) == pRangeSection); + for (TADDR fragmentAddress = pRangeSection->_range.RangeStart(); pRangeSection->_range.IsInRange(fragmentAddress); fragmentAddress = IncrementAddressByMaxSizeOfFragment(fragmentAddress)) + { + assert(LookupRangeSection(fragmentAddress, pLockState) == pRangeSection); + } + + // entriesInMapToUpdate was just a temporary allocation + free(entriesInMapToUpdate); + + return true; + } + +public: + RangeSectionMap() : _topLevel{0}, _pCleanupList(EndOfCleanupListMarker()) + { + } + + bool Init() + { + return true; + } + +#ifdef FEATURE_READYTORUN + RangeSection *AllocateRange(Range range, IJitManager* pJit, RangeSection::RangeSectionFlags flags, PTR_Module pR2RModule, RangeSectionLockState* pLockState) + { + PTR_RangeSection pSection(new(nothrow)RangeSection(range, pJit, flags, pR2RModule)); + if (pSection == NULL) + return NULL; + + if (!AttachRangeSectionToMap(pSection, pLockState)) + { + delete pSection; + return NULL; + } + return pSection; + } +#endif + + RangeSection *AllocateRange(Range range, IJitManager* pJit, RangeSection::RangeSectionFlags flags, PTR_HeapList pHeapList, RangeSectionLockState* pLockState) + { + PTR_RangeSection pSection(new(nothrow)RangeSection(range, pJit, flags, pHeapList)); + if (pSection == NULL) + return NULL; + + if (!AttachRangeSectionToMap(pSection, pLockState)) + { + delete pSection; + return NULL; + } + return pSection; + } + + RangeSection *AllocateRange(Range range, IJitManager* pJit, RangeSection::RangeSectionFlags flags, PTR_CodeRangeMapRangeList pRangeList, RangeSectionLockState* pLockState) + { + PTR_RangeSection pSection(new(nothrow)RangeSection(range, pJit, flags, pRangeList)); + if (pSection == NULL) + return NULL; + + if (!AttachRangeSectionToMap(pSection, pLockState)) + { + delete pSection; + return NULL; + } + return pSection; + } + + PTR_RangeSection LookupRangeSection(TADDR address, RangeSectionLockState *pLockState) + { + RangeSectionFragment* fragment = GetRangeSectionForAddress(address, pLockState); + if (fragment == NULL) + return NULL; + + while ((fragment != NULL) && !fragment->InRange(address)) + { + fragment = fragment->pRangeSectionFragmentNext.VolatileLoadWithoutBarrier(pLockState); + } + + if (fragment != NULL) + { + if (fragment->pRangeSection->_pRangeSectionNextForDelete != NULL) + return NULL; + return fragment->pRangeSection; + } + + return NULL; + } + + void RemoveRangeSection(RangeSection* pRangeSection) + { + assert(pRangeSection->_pRangeSectionNextForDelete == nullptr); + assert(pRangeSection->_flags & RangeSection::RANGE_SECTION_COLLECTIBLE); +#ifdef FEATURE_READYTORUN + assert(pRangeSection->_pR2RModule == NULL); +#endif + + // Removal is implemented by placing onto the cleanup linked list. This is then processed later during cleanup + RangeSection* pLatestRemovedRangeSection; + do + { + pLatestRemovedRangeSection = VolatileLoad(&_pCleanupList); + VolatileStore(&pRangeSection->_pRangeSectionNextForDelete, pLatestRemovedRangeSection); + } while (InterlockedCompareExchangeT(&_pCleanupList, pRangeSection, pLatestRemovedRangeSection) != pLatestRemovedRangeSection); + } + + void CleanupRangeSections(RangeSectionLockState *pLockState) + { + assert(*pLockState == RangeSectionLockState::WriteLocked); + + while (this->_pCleanupList != EndOfCleanupListMarker()) + { + PTR_RangeSection pRangeSectionToCleanup(this->_pCleanupList); + RangeSectionFragment* pRangeSectionFragmentToFree = nullptr; + this->_pCleanupList = pRangeSectionToCleanup->_pRangeSectionNextForDelete; + + uintptr_t rangeSectionFragmentCount = RangeSectionFragmentCount(pRangeSectionToCleanup); + + TADDR addressToPrepForCleanup = pRangeSectionToCleanup->_range.RangeStart(); + + assert(LookupRangeSection(addressToPrepForCleanup, pLockState) == NULL); + assert(LookupRangeSection(pRangeSectionToCleanup->_range.RangeEnd(), pLockState) == NULL); + for (TADDR fragmentAddress = addressToPrepForCleanup; pRangeSectionToCleanup->_range.IsInRange(fragmentAddress); fragmentAddress = IncrementAddressByMaxSizeOfFragment(fragmentAddress)) + { + assert(LookupRangeSection(fragmentAddress, pLockState) == NULL); + } + + // Remove fragments from each of the fragment linked lists + for (uintptr_t iFragment = 0; iFragment < rangeSectionFragmentCount; iFragment++) + { + RangeSectionFragmentPointer* entryInMapToUpdate = EnsureMapsForAddress(addressToPrepForCleanup); + assert(entryInMapToUpdate != NULL); + + while ((entryInMapToUpdate->VolatileLoadWithoutBarrier(pLockState))->pRangeSection != pRangeSectionToCleanup) + { + entryInMapToUpdate = &(entryInMapToUpdate->VolatileLoadWithoutBarrier(pLockState))->pRangeSectionFragmentNext; + } + + RangeSectionFragment* fragment = entryInMapToUpdate->VolatileLoadWithoutBarrier(pLockState); + + // The fragment associated with the start of the range has the address that was allocated earlier + if (iFragment == 0) + { + pRangeSectionFragmentToFree = fragment; + assert(pRangeSectionFragmentToFree->isPrimaryRangeSectionFragment); + } + + entryInMapToUpdate->VolatileStore(fragment->pRangeSectionFragmentNext.VolatileLoadWithoutBarrier(pLockState)); + addressToPrepForCleanup = IncrementAddressByMaxSizeOfFragment(addressToPrepForCleanup); + } + + // Free the array of fragments + delete pRangeSectionToCleanup; + free(pRangeSectionFragmentToFree); + } + } + + #ifdef DACCESS_COMPILE + void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) + { + if (!DacEnumMemoryRegion(dac_cast(this), sizeof(*this))) + return; + + _ASSERTE(FALSE); // Add an implementation here. + } + #endif// DACCESS_COMPILE - // union - // { - // PTR_CodeHeap pCodeHeap; // valid if RANGE_SECTION_HEAP is set - // PTR_Module pZapModule; // valid if RANGE_SECTION_HEAP is not set - // }; - TADDR pHeapListOrZapModule; -#if defined(HOST_64BIT) - PTR_UnwindInfoTable pUnwindInfoTable; // Points to unwind information for this memory range. -#endif // defined(HOST_64BIT) }; /*****************************************************************************/ @@ -1239,7 +1807,7 @@ class ExecutionManager } CONTRACTL_END; RangeSection * pRange = FindCodeRange(currentPC, GetScanFlags()); - return (pRange != NULL) ? pRange->pjit : NULL; + return (pRange != NULL) ? pRange->_pjit : NULL; } static RangeSection * FindCodeRange(PCODE currentPC, ScanFlag scanFlag); @@ -1284,11 +1852,17 @@ class ExecutionManager static void AddCodeRange(TADDR StartRange, TADDR EndRange, IJitManager* pJit, RangeSection::RangeSectionFlags flags, - void * pHp); + PTR_CodeRangeMapRangeList pRangeList); - static void AddNativeImageRange(TADDR StartRange, - SIZE_T Size, - Module * pModule); + static void AddCodeRange(TADDR StartRange, TADDR EndRange, + IJitManager* pJit, + RangeSection::RangeSectionFlags flags, + PTR_HeapList pHp); + + static void AddCodeRange(TADDR StartRange, TADDR EndRange, + IJitManager* pJit, + RangeSection::RangeSectionFlags flags, + PTR_Module pModule); static void DeleteRange(TADDR StartRange); @@ -1300,17 +1874,12 @@ class ExecutionManager return (ICodeManager *)m_pDefaultCodeMan; } - static PTR_Module FindZapModule(TADDR currentData); static PTR_Module FindReadyToRunModule(TADDR currentData); - // FindZapModule flavor to be used during GC to find GCRefMap + // FindReadyToRunModule flavor to be used during GC to find GCRefMap static PTR_Module FindModuleForGCRefMap(TADDR currentData); - static RangeSection* GetRangeSectionAndPrev(RangeSection *pRS, TADDR addr, RangeSection **ppPrev); - #ifdef DACCESS_COMPILE - static void EnumRangeList(RangeSection* list, - CLRDataEnumMemoryFlags flags); static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif @@ -1327,9 +1896,9 @@ class ExecutionManager static RangeSection * FindCodeRangeWithLock(PCODE currentPC); static BOOL IsManagedCodeWithLock(PCODE currentPC); - static BOOL IsManagedCodeWorker(PCODE currentPC); + static BOOL IsManagedCodeWorker(PCODE currentPC, RangeSectionLockState *pLockState); - static RangeSection* GetRangeSection(TADDR addr); + static RangeSection* GetRangeSection(TADDR addr, RangeSectionLockState *pLockState); SPTR_DECL(EECodeManager, m_pDefaultCodeMan); @@ -1343,7 +1912,7 @@ class ExecutionManager // infrastructure to manage readers so we can lock them out and delete domain data // make ReaderCount volatile because we have order dependency in READER_INCREMENT - VOLATILE_SPTR_DECL(RangeSection, m_CodeRangeList); + SPTR_DECL(RangeSectionMap, g_pCodeRangeMap); VOLATILE_SVAL_DECL(LONG, m_dwReaderCount); VOLATILE_SVAL_DECL(LONG, m_dwWriterLock); @@ -1372,14 +1941,6 @@ class ExecutionManager } #endif // defined(_DEBUG) - static void AddRangeHelper(TADDR StartRange, - TADDR EndRange, - IJitManager* pJit, - RangeSection::RangeSectionFlags flags, - TADDR pHeapListOrZapModule); - static void DeleteRangeHelper(RangeSection** ppRangeList, - TADDR StartRange); - #ifndef DACCESS_COMPILE static PCODE getNextJumpStub(MethodDesc* pMD, PCODE target, diff --git a/src/coreclr/vm/codeman.inl b/src/coreclr/vm/codeman.inl index da36c9fa14263..8af0fc0e48bfb 100644 --- a/src/coreclr/vm/codeman.inl +++ b/src/coreclr/vm/codeman.inl @@ -6,10 +6,10 @@ inline BOOL ExecutionManager::IsCollectibleMethod(const METHODTOKEN& MethodToken) { WRAPPER_NO_CONTRACT; - return MethodToken.m_pRangeSection->flags & RangeSection::RANGE_SECTION_COLLECTIBLE; + return MethodToken.m_pRangeSection->_flags & RangeSection::RANGE_SECTION_COLLECTIBLE; } inline TADDR IJitManager::JitTokenToModuleBase(const METHODTOKEN& MethodToken) { - return MethodToken.m_pRangeSection->LowAddress; + return MethodToken.m_pRangeSection->_range.RangeStart(); } diff --git a/src/coreclr/vm/comdelegate.cpp b/src/coreclr/vm/comdelegate.cpp index aec64968e6370..d2a347e0c9d10 100644 --- a/src/coreclr/vm/comdelegate.cpp +++ b/src/coreclr/vm/comdelegate.cpp @@ -1568,6 +1568,27 @@ FCIMPLEND extern "C" void * _ReturnAddress(void); #endif // _MSC_VER && !TARGET_UNIX +uint32_t MethodDescToNumFixedArgs(MethodDesc *pMD) +{ + WRAPPER_NO_CONTRACT; + + PCCOR_SIGNATURE pSig; + DWORD cbSigSize; + pMD->GetSig(&pSig, &cbSigSize); + + // Since the signature is known to be valid if we've loaded the Method, we can use the + // non-error checking parser here. + uint32_t data = CorSigUncompressCallingConv(pSig); + if (data & IMAGE_CEE_CS_CALLCONV_GENERIC) + { + // Skip over generic argument count + CorSigUncompressData(pSig); + } + + // Return argument count + return CorSigUncompressData(pSig); +} + // This is the single constructor for all Delegates. The compiler // doesn't provide an implementation of the Delegate constructor. We // provide that implementation through an ECall call to this method. @@ -1635,10 +1656,8 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar DelegateEEClass *pDelCls = (DelegateEEClass*)pDelMT->GetClass(); MethodDesc *pDelegateInvoke = COMDelegate::FindDelegateInvokeMethod(pDelMT); - MetaSig invokeSig(pDelegateInvoke); - MetaSig methodSig(pMeth); - UINT invokeArgCount = invokeSig.NumFixedArgs(); - UINT methodArgCount = methodSig.NumFixedArgs(); + UINT invokeArgCount = MethodDescToNumFixedArgs(pDelegateInvoke); + UINT methodArgCount = MethodDescToNumFixedArgs(pMeth); BOOL isStatic = pMeth->IsStatic(); if (!isStatic) { diff --git a/src/coreclr/vm/common.h b/src/coreclr/vm/common.h index 902f7cad6e385..0aabfce90c6bf 100644 --- a/src/coreclr/vm/common.h +++ b/src/coreclr/vm/common.h @@ -117,6 +117,7 @@ typedef DPTR(class EEClass) PTR_EEClass; typedef DPTR(class DelegateEEClass) PTR_DelegateEEClass; typedef DPTR(struct DomainLocalModule) PTR_DomainLocalModule; typedef VPTR(class EECodeManager) PTR_EECodeManager; +typedef DPTR(class RangeSectionMap) PTR_RangeSectionMap; typedef DPTR(class EEConfig) PTR_EEConfig; typedef VPTR(class EEDbgInterfaceImpl) PTR_EEDbgInterfaceImpl; typedef VPTR(class DebugInfoManager) PTR_DebugInfoManager; diff --git a/src/coreclr/vm/excep.cpp b/src/coreclr/vm/excep.cpp index efa7f786844b3..aee0e6b4fe293 100644 --- a/src/coreclr/vm/excep.cpp +++ b/src/coreclr/vm/excep.cpp @@ -2658,7 +2658,103 @@ HRESULT GetHRFromThrowable(OBJECTREF throwable) return hr; } -VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow) +struct Param : RaiseExceptionFilterParam +{ + OBJECTREF throwable; + BOOL fForStackOverflow; + ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE]; + Thread *pThread; + ThreadExceptionState* pExState; +#ifndef TARGET_WINDOWS + PAL_SEHException *pPalException; +#endif + +}; + +FORCEINLINE void RaiseTheExceptionInternalOnlyTryBody(Param *pParam) +{ + //_ASSERTE(! pParam->isRethrown || pParam->pExState->m_pExceptionRecord); + ULONG_PTR *args = NULL; + ULONG argCount = 0; + ULONG flags = 0; + ULONG code = 0; + + // Always save the current object in the handle so on rethrow we can reuse it. This is important as it + // contains stack trace info. + // + // Note: we use SafeSetLastThrownObject, which will try to set the throwable and if there are any problems, + // it will set the throwable to something appropriate (like OOM exception) and return the new + // exception. Thus, the user's exception object can be replaced here. + pParam->throwable = pParam->pThread->SafeSetLastThrownObject(pParam->throwable); + + if (!pParam->isRethrown || +#ifdef FEATURE_INTERPRETER + !pParam->pExState->IsExceptionInProgress() || +#endif // FEATURE_INTERPRETER + pParam->pExState->IsComPlusException() || + (pParam->pExState->GetExceptionCode() == STATUS_STACK_OVERFLOW)) + { + ULONG_PTR hr = GetHRFromThrowable(pParam->throwable); + + args = pParam->exceptionArgs; + argCount = MarkAsThrownByUs(args, hr); + flags = EXCEPTION_NONCONTINUABLE; + code = EXCEPTION_COMPLUS; + } + else + { + // Exception code should be consistent. + _ASSERTE((DWORD)(pParam->pExState->GetExceptionRecord()->ExceptionCode) == pParam->pExState->GetExceptionCode()); + + args = pParam->pExState->GetExceptionRecord()->ExceptionInformation; + argCount = pParam->pExState->GetExceptionRecord()->NumberParameters; + flags = pParam->pExState->GetExceptionRecord()->ExceptionFlags; + code = pParam->pExState->GetExceptionRecord()->ExceptionCode; + } + + if (pParam->pThread->IsAbortInitiated () && IsExceptionOfType(kThreadAbortException,&pParam->throwable)) + { + pParam->pThread->ResetPreparingAbort(); + + if (pParam->pThread->GetFrame() == FRAME_TOP) + { + // There is no more managed code on stack. + pParam->pThread->ResetAbort(); + } + } + + // Can't access the exception object when are in pre-emptive, so find out before + // if its an SO. + BOOL fIsStackOverflow = IsExceptionOfType(kStackOverflowException, &pParam->throwable); + + if (fIsStackOverflow || pParam->fForStackOverflow) + { + // Don't probe if we're already handling an SO. Just throw the exception. + RaiseException(code, flags, argCount, args); + } + + // This needs to be both here and inside the handler below + // enable preemptive mode before call into OS + GCX_PREEMP_NO_DTOR(); + +#ifndef TARGET_WINDOWS + if (pParam->pPalException != NULL) + { + RaiseExceptionProducePALExceptionOnly(code, flags, argCount, args, pParam->pPalException); + } + else +#endif + { + // In non-debug, we can just raise the exception once we've probed. + RaiseException(code, flags, argCount, args); + } +} + +VOID RaiseTheExceptionInternalOnlyCore(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow +#ifndef TARGET_WINDOWS +, PAL_SEHException *pPalException +#endif +) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; @@ -2688,18 +2784,14 @@ VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL r } #endif - struct Param : RaiseExceptionFilterParam - { - OBJECTREF throwable; - BOOL fForStackOverflow; - ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE]; - Thread *pThread; - ThreadExceptionState* pExState; - } param; + Param param; param.isRethrown = rethrow ? 1 : 0; // normalize because we use it as a count in RaiseExceptionFilter param.throwable = throwable; param.fForStackOverflow = fForStackOverflow; param.pThread = GetThread(); +#ifndef TARGET_WINDOWS + param.pPalException = pPalException; +#endif _ASSERTE(param.pThread); param.pExState = param.pThread->GetExceptionState(); @@ -2727,80 +2819,37 @@ VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL r #endif #endif - // raise - PAL_TRY(Param *, pParam, ¶m) + if (rethrow +#ifndef TARGET_WINDOWS + && (pPalException == NULL) +#endif + ) { - //_ASSERTE(! pParam->isRethrown || pParam->pExState->m_pExceptionRecord); - ULONG_PTR *args = NULL; - ULONG argCount = 0; - ULONG flags = 0; - ULONG code = 0; - - // Always save the current object in the handle so on rethrow we can reuse it. This is important as it - // contains stack trace info. - // - // Note: we use SafeSetLastThrownObject, which will try to set the throwable and if there are any problems, - // it will set the throwable to something appropriate (like OOM exception) and return the new - // exception. Thus, the user's exception object can be replaced here. - pParam->throwable = pParam->pThread->SafeSetLastThrownObject(pParam->throwable); - - if (!pParam->isRethrown || -#ifdef FEATURE_INTERPRETER - !pParam->pExState->IsExceptionInProgress() || -#endif // FEATURE_INTERPRETER - pParam->pExState->IsComPlusException() || - (pParam->pExState->GetExceptionCode() == STATUS_STACK_OVERFLOW)) + // raise + PAL_TRY(Param *, pParam, ¶m) { - ULONG_PTR hr = GetHRFromThrowable(pParam->throwable); - - args = pParam->exceptionArgs; - argCount = MarkAsThrownByUs(args, hr); - flags = EXCEPTION_NONCONTINUABLE; - code = EXCEPTION_COMPLUS; + RaiseTheExceptionInternalOnlyTryBody(pParam); } - else + PAL_EXCEPT_FILTER (RaiseExceptionFilter) { - // Exception code should be consistent. - _ASSERTE((DWORD)(pParam->pExState->GetExceptionRecord()->ExceptionCode) == pParam->pExState->GetExceptionCode()); - - args = pParam->pExState->GetExceptionRecord()->ExceptionInformation; - argCount = pParam->pExState->GetExceptionRecord()->NumberParameters; - flags = pParam->pExState->GetExceptionRecord()->ExceptionFlags; - code = pParam->pExState->GetExceptionRecord()->ExceptionCode; } - - if (pParam->pThread->IsAbortInitiated () && IsExceptionOfType(kThreadAbortException,&pParam->throwable)) - { - pParam->pThread->ResetPreparingAbort(); - - if (pParam->pThread->GetFrame() == FRAME_TOP) - { - // There is no more managed code on stack. - pParam->pThread->ResetAbort(); - } - } - - // Can't access the exception object when are in pre-emptive, so find out before - // if its an SO. - BOOL fIsStackOverflow = IsExceptionOfType(kStackOverflowException, &pParam->throwable); - - if (fIsStackOverflow || pParam->fForStackOverflow) - { - // Don't probe if we're already handling an SO. Just throw the exception. - RaiseException(code, flags, argCount, args); - } - - // This needs to be both here and inside the handler below - // enable preemptive mode before call into OS - GCX_PREEMP_NO_DTOR(); - - // In non-debug, we can just raise the exception once we've probed. - RaiseException(code, flags, argCount, args); + PAL_ENDTRY } - PAL_EXCEPT_FILTER (RaiseExceptionFilter) + else { + RaiseTheExceptionInternalOnlyTryBody(¶m); +#ifndef TARGET_WINDOWS + if (rethrow) + { + RaiseExceptionFilter(&pPalException->ExceptionPointers, ¶m); + } +#endif } - PAL_ENDTRY +} + +VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow) +{ + RaiseTheExceptionInternalOnlyCore(throwable, rethrow, fForStackOverflow); _ASSERTE(!"Cannot continue after COM+ exception"); // Debugger can bring you here. // For example, // Debugger breaks in due to second chance exception (unhandled) @@ -2810,7 +2859,6 @@ VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL r UNREACHABLE(); } - // INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter, so must put the call in a separate fcn static VOID DECLSPEC_NORETURN RealCOMPlusThrowWorker(OBJECTREF throwable, BOOL rethrow) { diff --git a/src/coreclr/vm/exceptmacros.h b/src/coreclr/vm/exceptmacros.h index 1627e3d3d65cc..fb3e6ce45be20 100644 --- a/src/coreclr/vm/exceptmacros.h +++ b/src/coreclr/vm/exceptmacros.h @@ -255,7 +255,11 @@ VEH_ACTION CLRVectoredExceptionHandler(PEXCEPTION_POINTERS pExceptionInfo); extern LONG InternalUnhandledExceptionFilter_Worker(PEXCEPTION_POINTERS pExceptionInfo); VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow = FALSE); - +VOID RaiseTheExceptionInternalOnlyCore(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow +#ifndef TARGET_WINDOWS +, PAL_SEHException* pPalException = NULL +#endif +); #if defined(DACCESS_COMPILE) #define INSTALL_UNWIND_AND_CONTINUE_HANDLER diff --git a/src/coreclr/vm/fcall.h b/src/coreclr/vm/fcall.h index c2816c992ecb9..7425691c45c9a 100644 --- a/src/coreclr/vm/fcall.h +++ b/src/coreclr/vm/fcall.h @@ -584,6 +584,7 @@ LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR ar /* gcpoll; */ \ INSTALL_MANAGED_EXCEPTION_DISPATCHER; \ __helperframe.Push(); \ + bool __popHelperMethodFrame=true; \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(&__helperframe); @@ -616,7 +617,7 @@ LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR ar #define HELPER_METHOD_FRAME_END_EX(gcpoll,allowGC) \ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; \ - __helperframe.Pop(); \ + if (__popHelperMethodFrame) __helperframe.Pop(); \ UNINSTALL_MANAGED_EXCEPTION_DISPATCHER; \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); diff --git a/src/coreclr/vm/gccover.cpp b/src/coreclr/vm/gccover.cpp index 71a49b63be540..248c37350d167 100644 --- a/src/coreclr/vm/gccover.cpp +++ b/src/coreclr/vm/gccover.cpp @@ -61,18 +61,20 @@ static MethodDesc* getTargetMethodDesc(PCODE target) _ASSERTE(token.IsValid()); return VirtualCallStubManager::GetInterfaceMethodDescFromToken(token); } - if (RangeSectionStubManager::GetStubKind(target) == STUB_CODE_BLOCK_PRECODE) + + auto stubKind = RangeSectionStubManager::GetStubKind(target); + if (stubKind == STUB_CODE_BLOCK_PRECODE) { // The address looks like a value stub, try to get the method descriptor. return MethodDesc::GetMethodDescFromStubAddr(target, TRUE); } - if (PrecodeStubManager::g_pManager->GetStubPrecodeRangeList()->IsInRange(target)) + if (stubKind == STUB_CODE_BLOCK_STUBPRECODE) { return (MethodDesc*)((StubPrecode*)PCODEToPINSTR(target))->GetMethodDesc(); } - if (PrecodeStubManager::g_pManager->GetFixupPrecodeRangeList()->IsInRange(target)) + if (stubKind == STUB_CODE_BLOCK_FIXUPPRECODE) { if (!FixupPrecode::IsFixupPrecodeByASM(target)) { diff --git a/src/coreclr/vm/genericdict.cpp b/src/coreclr/vm/genericdict.cpp index db18a4044f567..52645f0d92cf5 100644 --- a/src/coreclr/vm/genericdict.cpp +++ b/src/coreclr/vm/genericdict.cpp @@ -690,10 +690,7 @@ Dictionary::PopulateEntry( ptr = SigPointer((PCCOR_SIGNATURE)signature); IfFailThrow(ptr.GetData(&kind)); - Module * pContainingZapModule = ExecutionManager::FindZapModule(dac_cast(signature)); - - zapSigContext = ZapSig::Context(CoreLibBinder::GetModule(), (void *)pContainingZapModule, ZapSig::NormalTokens); - pZapSigContext = (pContainingZapModule != NULL) ? &zapSigContext : NULL; + pZapSigContext = NULL; } ModuleBase * pLookupModule = (isReadyToRunModule) ? pZapSigContext->pInfoModule : CoreLibBinder::GetModule(); diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp index 2628f560ab436..f5a2a1eb4746f 100644 --- a/src/coreclr/vm/jithelpers.cpp +++ b/src/coreclr/vm/jithelpers.cpp @@ -4026,7 +4026,14 @@ HCIMPL1(void, IL_Throw, Object* obj) } } - RaiseTheExceptionInternalOnly(oref, FALSE); +#ifndef TARGET_WINDOWS + // See definition of UNINSTALL_MANAGED_EXCEPTION_DISPATCHER for details around where exCopy and hasCaughtException come from + RaiseTheExceptionInternalOnlyCore(oref, FALSE, FALSE, &exCopy); + hasCaughtException = true; + __popHelperMethodFrame = false; // Disable the helper method frame pop +#else + RaiseTheExceptionInternal(oref, FALSE, FALSE); +#endif HELPER_METHOD_FRAME_END(); } @@ -4045,7 +4052,14 @@ HCIMPL0(void, IL_Rethrow) OBJECTREF throwable = GetThread()->GetThrowable(); if (throwable != NULL) { +#ifndef TARGET_WINDOWS + // See definition of UNINSTALL_MANAGED_EXCEPTION_DISPATCHER for details around where exCopy and hasCaughtException come from + RaiseTheExceptionInternalOnlyCore(throwable, TRUE, FALSE, &exCopy); + hasCaughtException = true; + __popHelperMethodFrame = false; // Disable the helper method frame pop +#else RaiseTheExceptionInternalOnly(throwable, TRUE); +#endif } else { diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 5bc558cd3db1f..f10998ffae379 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -14724,10 +14724,10 @@ void EECodeInfo::Init(PCODE codeAddress, ExecutionManager::ScanFlag scanFlag) if (pRS == NULL) goto Invalid; - if (!pRS->pjit->JitCodeToMethodInfo(pRS, codeAddress, &m_pMD, this)) + if (!pRS->_pjit->JitCodeToMethodInfo(pRS, codeAddress, &m_pMD, this)) goto Invalid; - m_pJM = pRS->pjit; + m_pJM = pRS->_pjit; return; Invalid: diff --git a/src/coreclr/vm/loaderallocator.cpp b/src/coreclr/vm/loaderallocator.cpp index f1ba447b34c95..b49a100d105f4 100644 --- a/src/coreclr/vm/loaderallocator.cpp +++ b/src/coreclr/vm/loaderallocator.cpp @@ -17,7 +17,9 @@ UINT64 LoaderAllocator::cLoaderAllocatorsCreated = 1; -LoaderAllocator::LoaderAllocator() +LoaderAllocator::LoaderAllocator(bool collectible) : + m_stubPrecodeRangeList(STUB_CODE_BLOCK_STUBPRECODE, collectible), + m_fixupPrecodeRangeList(STUB_CODE_BLOCK_FIXUPPRECODE, collectible) { LIMITED_METHOD_CONTRACT; @@ -66,7 +68,7 @@ LoaderAllocator::LoaderAllocator() m_pLastUsedCodeHeap = NULL; m_pLastUsedDynamicCodeHeap = NULL; m_pJumpStubCache = NULL; - m_IsCollectible = false; + m_IsCollectible = collectible; m_pMarshalingData = NULL; @@ -1194,7 +1196,7 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory) m_pNewStubPrecodeHeap = new (&m_NewStubPrecodeHeapInstance) LoaderHeap(2 * GetOsPageSize(), 2 * GetOsPageSize(), - PrecodeStubManager::g_pManager->GetStubPrecodeRangeList(), + &m_stubPrecodeRangeList, UnlockedLoaderHeap::HeapKind::Interleaved, false /* fUnlocked */, StubPrecode::GenerateCodePage, @@ -1202,7 +1204,7 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory) m_pFixupPrecodeHeap = new (&m_FixupPrecodeHeapInstance) LoaderHeap(2 * GetOsPageSize(), 2 * GetOsPageSize(), - PrecodeStubManager::g_pManager->GetFixupPrecodeRangeList(), + &m_fixupPrecodeRangeList, UnlockedLoaderHeap::HeapKind::Interleaved, false /* fUnlocked */, FixupPrecode::GenerateCodePage, @@ -1687,17 +1689,6 @@ void DomainAssemblyIterator::operator++() pNextAssembly = pCurrentAssembly ? pCurrentAssembly->GetNextDomainAssemblyInSameALC() : NULL; } -void AssemblyLoaderAllocator::SetCollectible() -{ - CONTRACTL - { - NOTHROW; - } - CONTRACTL_END; - - m_IsCollectible = true; -} - #ifndef DACCESS_COMPILE void AssemblyLoaderAllocator::Init(AppDomain* pAppDomain) diff --git a/src/coreclr/vm/loaderallocator.hpp b/src/coreclr/vm/loaderallocator.hpp index b943ea37ad4dd..da7349efc4354 100644 --- a/src/coreclr/vm/loaderallocator.hpp +++ b/src/coreclr/vm/loaderallocator.hpp @@ -39,6 +39,150 @@ typedef SHash> LoaderAllocatorSet; class CustomAssemblyBinder; + +// This implements the Add/Remove rangelist api on top of the CodeRangeMap in the code manager +class CodeRangeMapRangeList : public RangeList +{ +public: + VPTR_VTABLE_CLASS(CodeRangeMapRangeList, RangeList) + +#if defined(DACCESS_COMPILE) || !defined(TARGET_WINDOWS) + CodeRangeMapRangeList() : + _RangeListRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT), + _rangeListType(STUB_CODE_BLOCK_UNKNOWN), + _id(NULL), + _collectible(true) + {} +#endif + + CodeRangeMapRangeList(StubCodeBlockKind rangeListType, bool collectible) : + _RangeListRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT), + _rangeListType(rangeListType), + _id(NULL), + _collectible(collectible) + { + LIMITED_METHOD_CONTRACT; + } + + ~CodeRangeMapRangeList() + { + LIMITED_METHOD_CONTRACT; + RemoveRangesWorker(_id, NULL, NULL); + } + + StubCodeBlockKind GetCodeBlockKind() + { + LIMITED_METHOD_CONTRACT; + return _rangeListType; + } + +private: +#ifndef DACCESS_COMPILE + void AddRangeWorkerHelper(TADDR start, TADDR end, void* id) + { + SimpleWriteLockHolder lh(&_RangeListRWLock); + + _ASSERTE(id == _id || _id == NULL); + _id = id; + // Grow the array first, so that a failure cannot break the + + RangeSection::RangeSectionFlags flags = RangeSection::RANGE_SECTION_RANGELIST; + if (_collectible) + { + _starts.Preallocate(_starts.GetCount() + 1); + flags = (RangeSection::RangeSectionFlags)(flags | RangeSection::RANGE_SECTION_COLLECTIBLE); + } + + ExecutionManager::AddCodeRange(start, end, ExecutionManager::GetEEJitManager(), flags, this); + + if (_collectible) + { + // This cannot fail as the array was Preallocated above. + _starts.Append(start); + } + } +#endif + +protected: + virtual BOOL AddRangeWorker(const BYTE *start, const BYTE *end, void *id) + { + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + } + CONTRACTL_END; + +#ifndef DACCESS_COMPILE + BOOL result = FALSE; + + EX_TRY + { + AddRangeWorkerHelper((TADDR)start, (TADDR)end, id); + result = TRUE; + } + EX_CATCH + { + } + EX_END_CATCH(SwallowAllExceptions) + + return result; +#else + return FALSE; +#endif // DACCESS_COMPILE + } + + virtual void RemoveRangesWorker(void *id, const BYTE *start, const BYTE *end) + { + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + } + CONTRACTL_END; + +#ifndef DACCESS_COMPILE + // This implementation only works for the case where the RangeList is used in a single LoaderHeap + _ASSERTE(start == NULL); + _ASSERTE(end == NULL); + + SimpleWriteLockHolder lh(&_RangeListRWLock); + _ASSERTE(id == _id || (_id == NULL && _starts.IsEmpty())); + + // Iterate backwards to improve efficiency of removals + // as any linked lists in the RangeSectionMap code are in reverse order of insertion. + for (auto i = _starts.GetCount(); i > 0;) + { + --i; + if (_starts[i] != 0) + { + ExecutionManager::DeleteRange(_starts[i]); + _starts[i] = 0; + } + } +#endif // DACCESS_COMPILE + } + + virtual BOOL IsInRangeWorker(TADDR address, TADDR *pID = NULL) + { + WRAPPER_NO_CONTRACT; + RangeSection *pRS = ExecutionManager::FindCodeRange(address, ExecutionManager::ScanReaderLock); + if (pRS == NULL) + return FALSE; + if ((pRS->_flags & RangeSection::RANGE_SECTION_RANGELIST) == 0) + return FALSE; + + return (pRS->_pRangeList == this); + } + +private: + SimpleRWLock _RangeListRWLock; + StubCodeBlockKind _rangeListType; + SArray _starts; + void* _id; + bool _collectible; +}; + // Iterator over a DomainAssembly in the same ALC class DomainAssemblyIterator { @@ -197,6 +341,9 @@ class LoaderAllocator // IL stub cache with fabricated MethodTable parented by a random module in this LoaderAllocator. ILStubCache m_ILStubCache; + CodeRangeMapRangeList m_stubPrecodeRangeList; + CodeRangeMapRangeList m_fixupPrecodeRangeList; + #ifdef FEATURE_PGO // PgoManager to hold pgo data associated with this LoaderAllocator Volatile m_pgoManager; @@ -555,7 +702,7 @@ class LoaderAllocator OBJECTREF GetHandleValue(LOADERHANDLE handle); - LoaderAllocator(); + LoaderAllocator(bool collectible); virtual ~LoaderAllocator(); BaseDomain *GetDomain() { LIMITED_METHOD_CONTRACT; return m_pDomain; } virtual BOOL CanUnload() = 0; @@ -702,7 +849,7 @@ class GlobalLoaderAllocator : public LoaderAllocator public: void Init(BaseDomain *pDomain); - GlobalLoaderAllocator() : m_Id(LAT_Global, (void*)1) { LIMITED_METHOD_CONTRACT;}; + GlobalLoaderAllocator() : LoaderAllocator(false), m_Id(LAT_Global, (void*)1) { LIMITED_METHOD_CONTRACT;}; virtual LoaderAllocatorID* Id(); virtual BOOL CanUnload(); }; @@ -722,7 +869,7 @@ class AssemblyLoaderAllocator : public LoaderAllocator ShuffleThunkCache* m_pShuffleThunkCache; public: virtual LoaderAllocatorID* Id(); - AssemblyLoaderAllocator() : m_Id(LAT_Assembly), m_pShuffleThunkCache(NULL) + AssemblyLoaderAllocator() : LoaderAllocator(true), m_Id(LAT_Assembly), m_pShuffleThunkCache(NULL) #if !defined(DACCESS_COMPILE) , m_binderToRelease(NULL) #endif @@ -730,8 +877,6 @@ class AssemblyLoaderAllocator : public LoaderAllocator void Init(AppDomain *pAppDomain); virtual BOOL CanUnload(); - void SetCollectible(); - void AddDomainAssembly(DomainAssembly *pDomainAssembly) { WRAPPER_NO_CONTRACT; diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp index b4ac30905de79..3827d864895d6 100644 --- a/src/coreclr/vm/method.cpp +++ b/src/coreclr/vm/method.cpp @@ -2109,17 +2109,6 @@ MethodDesc* NonVirtualEntry2MethodDesc(PCODE entryPoint) RangeSection* pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags()); if (pRS == NULL) { - TADDR pInstr = PCODEToPINSTR(entryPoint); - if (PrecodeStubManager::g_pManager->GetStubPrecodeRangeList()->IsInRange(entryPoint)) - { - return (MethodDesc*)((StubPrecode*)pInstr)->GetMethodDesc(); - } - - if (PrecodeStubManager::g_pManager->GetFixupPrecodeRangeList()->IsInRange(entryPoint)) - { - return (MethodDesc*)((FixupPrecode*)pInstr)->GetMethodDesc(); - } - // Is it an FCALL? MethodDesc* pFCallMD = ECall::MapTargetBackToMethod(entryPoint); if (pFCallMD != NULL) @@ -2130,16 +2119,38 @@ MethodDesc* NonVirtualEntry2MethodDesc(PCODE entryPoint) return NULL; } + // Inlined fast path for fixup precode and stub precode from RangeList implementation + if (pRS->_flags == RangeSection::RANGE_SECTION_RANGELIST) + { + if (pRS->_pRangeList->GetCodeBlockKind() == STUB_CODE_BLOCK_FIXUPPRECODE) + { + return (MethodDesc*)((FixupPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + } + if (pRS->_pRangeList->GetCodeBlockKind() == STUB_CODE_BLOCK_STUBPRECODE) + { + return (MethodDesc*)((StubPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + } + } + MethodDesc* pMD; - if (pRS->pjit->JitCodeToMethodInfo(pRS, entryPoint, &pMD, NULL)) + if (pRS->_pjit->JitCodeToMethodInfo(pRS, entryPoint, &pMD, NULL)) return pMD; - if (pRS->pjit->GetStubCodeBlockKind(pRS, entryPoint) == STUB_CODE_BLOCK_PRECODE) - return MethodDesc::GetMethodDescFromStubAddr(entryPoint); + auto stubCodeBlockKind = pRS->_pjit->GetStubCodeBlockKind(pRS, entryPoint); - // We should never get here - _ASSERTE(!"NonVirtualEntry2MethodDesc failed for RangeSection"); - return NULL; + switch(stubCodeBlockKind) + { + case STUB_CODE_BLOCK_PRECODE: + return MethodDesc::GetMethodDescFromStubAddr(entryPoint); + case STUB_CODE_BLOCK_FIXUPPRECODE: + return (MethodDesc*)((FixupPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + case STUB_CODE_BLOCK_STUBPRECODE: + return (MethodDesc*)((StubPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + default: + // We should never get here + _ASSERTE(!"NonVirtualEntry2MethodDesc failed for RangeSection"); + return NULL; + } } //******************************************************************************* diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp index 4b4373ac40e81..c8a95d77a18cd 100644 --- a/src/coreclr/vm/prestub.cpp +++ b/src/coreclr/vm/prestub.cpp @@ -2399,12 +2399,6 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl } #endif - // FUTURE: Consider always passing in module and section index to avoid the lookups - if (pModule == NULL) - { - pModule = ExecutionManager::FindZapModule(pIndirection); - sectionIndex = (DWORD)-1; - } _ASSERTE(pModule != NULL); pEMFrame->SetCallSite(pModule, pIndirection); diff --git a/src/coreclr/vm/stubmgr.cpp b/src/coreclr/vm/stubmgr.cpp index fcf9838e9e235..1083add024ced 100644 --- a/src/coreclr/vm/stubmgr.cpp +++ b/src/coreclr/vm/stubmgr.cpp @@ -1003,7 +1003,8 @@ BOOL PrecodeStubManager::CheckIsStub_Internal(PCODE stubStartAddress) } CONTRACTL_END; - return GetStubPrecodeRangeList()->IsInRange(stubStartAddress) || GetFixupPrecodeRangeList()->IsInRange(stubStartAddress); + auto stubKind = RangeSectionStubManager::GetStubKind(stubStartAddress); + return (stubKind == STUB_CODE_BLOCK_FIXUPPRECODE) || (stubKind == STUB_CODE_BLOCK_STUBPRECODE); } BOOL PrecodeStubManager::DoTraceStub(PCODE stubStartAddress, @@ -1551,7 +1552,7 @@ RangeSectionStubManager::GetStubKind(PCODE stubStartAddress) if (pRS == NULL) return STUB_CODE_BLOCK_UNKNOWN; - return pRS->pjit->GetStubCodeBlockKind(pRS, stubStartAddress); + return pRS->_pjit->GetStubCodeBlockKind(pRS, stubStartAddress); } // @@ -2380,8 +2381,6 @@ PrecodeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p PrecodeStubManager\n", dac_cast(this))); - GetStubPrecodeRangeList()->EnumMemoryRegions(flags); - GetFixupPrecodeRangeList()->EnumMemoryRegions(flags); } void diff --git a/src/coreclr/vm/stubmgr.h b/src/coreclr/vm/stubmgr.h index 49e2e83770476..c4c80d0b17db6 100644 --- a/src/coreclr/vm/stubmgr.h +++ b/src/coreclr/vm/stubmgr.h @@ -399,28 +399,6 @@ class PrecodeStubManager : public StubManager ~PrecodeStubManager() {WRAPPER_NO_CONTRACT;} #endif - protected: - LockedRangeList m_stubPrecodeRangeList; - LockedRangeList m_fixupPrecodeRangeList; - - public: - // Get dac-ized pointer to rangelist. - PTR_RangeList GetStubPrecodeRangeList() - { - SUPPORTS_DAC; - - TADDR addr = PTR_HOST_MEMBER_TADDR(PrecodeStubManager, this, m_stubPrecodeRangeList); - return PTR_RangeList(addr); - } - - PTR_RangeList GetFixupPrecodeRangeList() - { - SUPPORTS_DAC; - - TADDR addr = PTR_HOST_MEMBER_TADDR(PrecodeStubManager, this, m_fixupPrecodeRangeList); - return PTR_RangeList(addr); - } - public: virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp index 36a5a30141871..cdc4c11ade874 100644 --- a/src/coreclr/vm/threads.cpp +++ b/src/coreclr/vm/threads.cpp @@ -3951,30 +3951,6 @@ DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo) return dwResult; } -void Thread::Wake(SyncBlock *psb) -{ - WRAPPER_NO_CONTRACT; - - CLREvent* hEvent = NULL; - WaitEventLink *walk = &m_WaitEventLink; - while (walk->m_Next) { - if (walk->m_Next->m_WaitSB == psb) { - hEvent = walk->m_Next->m_EventWait; - // We are guaranteed that only one thread can change walk->m_Next->m_WaitSB - // since the thread is helding the syncblock. - walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1); - break; - } -#ifdef _DEBUG - else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) { - _ASSERTE (!"Can not wake a thread on the same SyncBlock more than once"); - } -#endif - } - PREFIX_ASSUME (hEvent != NULL); - hEvent->Set(); -} - #define WAIT_INTERRUPT_THREADABORT 0x1 #define WAIT_INTERRUPT_INTERRUPT 0x2 #define WAIT_INTERRUPT_OTHEREXCEPTION 0x4 diff --git a/src/coreclr/vm/threads.h b/src/coreclr/vm/threads.h index c312981b70771..1665565c5e640 100644 --- a/src/coreclr/vm/threads.h +++ b/src/coreclr/vm/threads.h @@ -3170,7 +3170,6 @@ class Thread // Support for Wait/Notify BOOL Block(INT32 timeOut, PendingSync *syncInfo); - void Wake(SyncBlock *psb); DWORD Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo); DWORD Wait(CLREvent* pEvent, INT32 timeOut, PendingSync *syncInfo);