diff --git a/src/coreclr/vm/clsload.cpp b/src/coreclr/vm/clsload.cpp index 02505194ffd06..68de4d4b718fa 100644 --- a/src/coreclr/vm/clsload.cpp +++ b/src/coreclr/vm/clsload.cpp @@ -1061,27 +1061,8 @@ void ClassLoader::TryEnsureLoaded(TypeHandle typeHnd, ClassLoadLevel level) #endif // DACCESS_COMPILE } -// This is separated out to avoid the overhead of C++ exception handling in the non-locking case. /* static */ -TypeHandle ClassLoader::LookupTypeKeyUnderLock(TypeKey *pKey, - EETypeHashTable *pTable, - CrstBase *pLock) -{ - WRAPPER_NO_CONTRACT; - SUPPORTS_DAC; - - // m_AvailableTypesLock has to be taken in cooperative mode to avoid deadlocks during GC - GCX_MAYBE_COOP_NO_THREAD_BROKEN(!IsGCThread()); - - CrstHolder ch(pLock); - return pTable->GetValue(pKey); -} - -/* static */ -TypeHandle ClassLoader::LookupTypeKey(TypeKey *pKey, - EETypeHashTable *pTable, - CrstBase *pLock, - BOOL fCheckUnderLock) +TypeHandle ClassLoader::LookupTypeKey(TypeKey *pKey, EETypeHashTable *pTable) { CONTRACTL { NOTHROW; @@ -1090,26 +1071,15 @@ TypeHandle ClassLoader::LookupTypeKey(TypeKey *pKey, PRECONDITION(CheckPointer(pKey)); PRECONDITION(pKey->IsConstructed()); PRECONDITION(CheckPointer(pTable)); - PRECONDITION(!fCheckUnderLock || CheckPointer(pLock)); MODE_ANY; SUPPORTS_DAC; } CONTRACTL_END; - TypeHandle th; - - if (fCheckUnderLock) - { - th = LookupTypeKeyUnderLock(pKey, pTable, pLock); - } - else - { - th = pTable->GetValue(pKey); - } - return th; + return pTable->GetValue(pKey); } /* static */ -TypeHandle ClassLoader::LookupInLoaderModule(TypeKey *pKey, BOOL fCheckUnderLock) +TypeHandle ClassLoader::LookupInLoaderModule(TypeKey *pKey) { CONTRACTL { NOTHROW; @@ -1124,39 +1094,12 @@ TypeHandle ClassLoader::LookupInLoaderModule(TypeKey *pKey, BOOL fCheckUnderLock Module *pLoaderModule = ComputeLoaderModule(pKey); PREFIX_ASSUME(pLoaderModule!=NULL); - return LookupTypeKey(pKey, - pLoaderModule->GetAvailableParamTypes(), - &pLoaderModule->GetClassLoader()->m_AvailableTypesLock, - fCheckUnderLock); + return LookupTypeKey(pKey, pLoaderModule->GetAvailableParamTypes()); } /* static */ TypeHandle ClassLoader::LookupTypeHandleForTypeKey(TypeKey *pKey) -{ - WRAPPER_NO_CONTRACT; - SUPPORTS_DAC; - - // Make an initial lookup without taking any locks. - TypeHandle th = LookupTypeHandleForTypeKeyInner(pKey, FALSE); - - // A non-null TypeHandle for the above lookup indicates success - // A null TypeHandle only indicates "well, it might have been there, - // try again with a lock". This kind of negative result will - // only happen while accessing the underlying EETypeHashTable - // during a resize, i.e. very rarely. In such a case, we just - // perform the lookup again, but indicate that appropriate locks - // should be taken. - - if (th.IsNull()) - { - th = LookupTypeHandleForTypeKeyInner(pKey, TRUE); - } - - return th; -} -/* static */ -TypeHandle ClassLoader::LookupTypeHandleForTypeKeyInner(TypeKey *pKey, BOOL fCheckUnderLock) { CONTRACTL { @@ -1184,7 +1127,7 @@ TypeHandle ClassLoader::LookupTypeHandleForTypeKeyInner(TypeKey *pKey, BOOL fChe // If the thing is not NGEN'd then this may // be different to pPreferredZapModule. If they are the same then // we can reuse the results of the lookup above. - TypeHandle thLM = LookupInLoaderModule(pKey, fCheckUnderLock); + TypeHandle thLM = LookupInLoaderModule(pKey); if (!thLM.IsNull()) { return thLM; @@ -2055,11 +1998,10 @@ VOID ClassLoader::Init(AllocMemTracker *pamTracker) CrstAvailableClass, CRST_REENTRANCY); - // This lock is taken within the classloader whenever we have to insert a new param. type into the table - // This lock also needs to be taken for a read operation in a GC_NOTRIGGER scope, thus the ANYMODE flag. + // This lock is taken within the classloader whenever we have to insert a new param. type into the table. m_AvailableTypesLock.Init( - CrstAvailableParamTypes, - (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)); + CrstAvailableParamTypes, + CRST_DEBUGGER_THREAD); #ifdef _DEBUG CorTypeInfo::CheckConsistency(); @@ -3104,9 +3046,6 @@ TypeHandle ClassLoader::PublishType(TypeKey *pTypeKey, TypeHandle typeHnd) Module *pLoaderModule = ComputeLoaderModule(pTypeKey); EETypeHashTable *pTable = pLoaderModule->GetAvailableParamTypes(); - // m_AvailableTypesLock has to be taken in cooperative mode to avoid deadlocks during GC - GCX_COOP(); - CrstHolder ch(&pLoaderModule->GetClassLoader()->m_AvailableTypesLock); // The type could have been loaded by a different thread as side-effect of avoiding deadlocks caused by LoadsTypeViolation @@ -3121,9 +3060,6 @@ TypeHandle ClassLoader::PublishType(TypeKey *pTypeKey, TypeHandle typeHnd) Module *pModule = pTypeKey->GetModule(); mdTypeDef typeDef = pTypeKey->GetTypeToken(); - // m_AvailableTypesLock has to be taken in cooperative mode to avoid deadlocks during GC - GCX_COOP(); - CrstHolder ch(&pModule->GetClassLoader()->m_AvailableTypesLock); // ! We cannot fail after this point. diff --git a/src/coreclr/vm/clsload.hpp b/src/coreclr/vm/clsload.hpp index ac5107a674425..53afd0b38f4cf 100644 --- a/src/coreclr/vm/clsload.hpp +++ b/src/coreclr/vm/clsload.hpp @@ -899,21 +899,13 @@ class ClassLoader ClassLoadLevel level = CLASS_LOADED, const InstantiationContext *pInstContext = NULL); - static TypeHandle LookupTypeKeyUnderLock(TypeKey *pKey, - EETypeHashTable *pTable, - CrstBase *pLock); + static TypeHandle LookupTypeKey(TypeKey *pKey, EETypeHashTable *pTable); - static TypeHandle LookupTypeKey(TypeKey *pKey, - EETypeHashTable *pTable, - CrstBase *pLock, - BOOL fCheckUnderLock); - - static TypeHandle LookupInLoaderModule(TypeKey* pKey, BOOL fCheckUnderLock); + static TypeHandle LookupInLoaderModule(TypeKey* pKey); // Lookup a handle in the appropriate table // (declaring module for TypeDef or loader-module for constructed types) static TypeHandle LookupTypeHandleForTypeKey(TypeKey *pTypeKey); - static TypeHandle LookupTypeHandleForTypeKeyInner(TypeKey *pTypeKey, BOOL fCheckUnderLock); static void DECLSPEC_NORETURN ThrowTypeLoadException(TypeKey *pKey, UINT resIDWhy); diff --git a/src/coreclr/vm/dacenumerablehash.h b/src/coreclr/vm/dacenumerablehash.h index 6a07096fccf16..b371756466aaf 100644 --- a/src/coreclr/vm/dacenumerablehash.h +++ b/src/coreclr/vm/dacenumerablehash.h @@ -96,6 +96,16 @@ class DacEnumerableHashTable void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif // DACCESS_COMPILE +private: + struct VolatileEntry; + typedef DPTR(struct VolatileEntry) PTR_VolatileEntry; + struct VolatileEntry + { + VALUE m_sValue; // The derived-class format of an entry + PTR_VolatileEntry m_pNextEntry; // Pointer to the next entry in the bucket chain (or NULL) + DacEnumerableHashValue m_iHashValue; // The hash value associated with the entry + }; + protected: // This opaque structure provides enumeration context when walking the set of entries which share a common // hash code. Initialized by BaseFindFirstEntryByHash and read/updated by BaseFindNextEntryByHash. @@ -106,6 +116,7 @@ class DacEnumerableHashTable TADDR m_pEntry; // The entry the caller is currently looking at (or NULL to begin // with). This is a VolatileEntry* and should always be a target address // not a DAC PTR_. + DPTR(PTR_VolatileEntry) m_curBuckets; // The bucket table we are working with. }; // This opaque structure provides enumeration context when walking all entries in the table. Initialized @@ -176,16 +187,9 @@ class DacEnumerableHashTable PTR_Module m_pModule; private: + private: // Internal implementation details. Nothing of interest to sub-classers for here on. - - struct VolatileEntry; - typedef DPTR(struct VolatileEntry) PTR_VolatileEntry; - struct VolatileEntry - { - VALUE m_sValue; // The derived-class format of an entry - PTR_VolatileEntry m_pNextEntry; // Pointer to the next entry in the bucket chain (or NULL) - DacEnumerableHashValue m_iHashValue; // The hash value associated with the entry - }; + DPTR(VALUE) BaseFindFirstEntryByHashCore(DPTR(PTR_VolatileEntry) curBuckets, DacEnumerableHashValue iHash, LookupContext* pContext); #ifndef DACCESS_COMPILE // Determine loader heap to be used for allocation of entries and bucket lists. @@ -206,11 +210,27 @@ class DacEnumerableHashTable return m_pBuckets; } + // our bucket table uses two extra slots - slot [0] contains the length of the table, + // slot [1] will contain the next version of the table if it resizes + static const int SLOT_LENGTH = 0; + static const int SLOT_NEXT = 1; + // normal slots start at slot #2 + static const int SKIP_SPECIAL_SLOTS = 2; + + static DWORD GetLength(DPTR(PTR_VolatileEntry) buckets) + { + return (DWORD)dac_cast(buckets[SLOT_LENGTH]); + } + + static DPTR(PTR_VolatileEntry) GetNext(DPTR(PTR_VolatileEntry) buckets) + { + return dac_cast(buckets[SLOT_NEXT]); + } + // Loader heap provided at construction time. May be NULL (in which case m_pModule must *not* be NULL). LoaderHeap *m_pHeap; DPTR(PTR_VolatileEntry) m_pBuckets; // Pointer to a simple bucket list (array of VolatileEntry pointers) - DWORD m_cBuckets; // Count of buckets in the above array (always non-zero) DWORD m_cEntries; // Count of elements }; diff --git a/src/coreclr/vm/dacenumerablehash.inl b/src/coreclr/vm/dacenumerablehash.inl index 4faddb9ff558c..4216ef3503372 100644 --- a/src/coreclr/vm/dacenumerablehash.inl +++ b/src/coreclr/vm/dacenumerablehash.inl @@ -41,11 +41,16 @@ DacEnumerableHashTable::DacEnumerableHashTable(Module *pModu m_pModule = pModule; m_pHeap = pHeap; - S_SIZE_T cbBuckets = S_SIZE_T(sizeof(VolatileEntry*)) * S_SIZE_T(cInitialBuckets); + // two extra slots - slot [0] contains the length of the table, + // slot [1] will contain the next version of the table if it resizes + S_SIZE_T cbBuckets = S_SIZE_T(sizeof(VolatileEntry*)) * (S_SIZE_T(cInitialBuckets) + S_SIZE_T(SKIP_SPECIAL_SLOTS)); m_cEntries = 0; - m_cBuckets = cInitialBuckets; - m_pBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets); + PTR_VolatileEntry* pBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets); + ((size_t*)pBuckets)[SLOT_LENGTH] = cInitialBuckets; + + // publish after setting the length + VolatileStore(&m_pBuckets, pBuckets); // Note: Memory allocated on loader heap is zero filled } @@ -116,10 +121,6 @@ void DacEnumerableHashTable::BaseInsertEntry(DacEnumerableHa } CONTRACTL_END; - // We are always guaranteed at least one bucket (which is important here: some hash table sub-classes - // require entry insertion to be fault free). - _ASSERTE(m_cBuckets > 0); - // Recover the volatile entry pointer from the sub-class entry pointer passed to us. In debug builds // attempt to validate that this transform is really valid and the caller didn't attempt to allocate the // entry via some other means than BaseAllocateEntry(). @@ -129,21 +130,24 @@ void DacEnumerableHashTable::BaseInsertEntry(DacEnumerableHa // Remember the entry hash code. pVolatileEntry->m_iHashValue = iHash; - // Compute which bucket the entry belongs in based on the hash. - DWORD dwBucket = iHash % m_cBuckets; + DPTR(PTR_VolatileEntry) curBuckets = GetBuckets(); + DWORD cBuckets = GetLength(curBuckets); + + // Compute which bucket the entry belongs in based on the hash. (+2 to skip "length" and "next" slots) + DWORD dwBucket = iHash % cBuckets + SKIP_SPECIAL_SLOTS; // Prepare to link the new entry at the head of the bucket chain. - pVolatileEntry->m_pNextEntry = (GetBuckets())[dwBucket]; + pVolatileEntry->m_pNextEntry = curBuckets[dwBucket]; // Publish the entry by pointing the bucket at it. // Make sure that all writes to the entry are visible before publishing the entry. - VolatileStore(&(GetBuckets())[dwBucket], pVolatileEntry); + VolatileStore(&curBuckets[dwBucket], pVolatileEntry); m_cEntries++; // If the insertion pushed the table load over our limit then attempt to grow the bucket list. Note that // we ignore any failure (this is a performance operation and is not required for correctness). - if (m_cEntries > (2 * m_cBuckets)) + if (m_cEntries > (2 * cBuckets)) GrowTable(); } @@ -165,40 +169,67 @@ void DacEnumerableHashTable::GrowTable() // error to our caller. FAULT_NOT_FATAL(); + DPTR(PTR_VolatileEntry) curBuckets = GetBuckets(); + DWORD cBuckets = GetLength(curBuckets); + // Make the new bucket table larger by the scale factor requested by the subclass (but also prime). - DWORD cNewBuckets = NextLargestPrime(m_cBuckets * SCALE_FACTOR); - S_SIZE_T cbNewBuckets = S_SIZE_T(cNewBuckets) * S_SIZE_T(sizeof(PTR_VolatileEntry)); + DWORD cNewBuckets = NextLargestPrime(cBuckets * SCALE_FACTOR); + // two extra slots - slot [0] contains the length of the table, + // slot [1] will contain the next version of the table if it resizes + S_SIZE_T cbNewBuckets = (S_SIZE_T(cNewBuckets) + S_SIZE_T(SKIP_SPECIAL_SLOTS)) * S_SIZE_T(sizeof(PTR_VolatileEntry)); + PTR_VolatileEntry *pNewBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem_NoThrow(cbNewBuckets); if (!pNewBuckets) return; + // element 0 stores the length of the table + ((size_t*)pNewBuckets)[SLOT_LENGTH] = cNewBuckets; + // element 1 stores the next version of the table (after length is written) + // NOTE: DAC does not call add/grow, so this cast is ok. + VolatileStore(&((PTR_VolatileEntry**)curBuckets)[SLOT_NEXT], pNewBuckets); + // All buckets are initially empty. // Note: Memory allocated on loader heap is zero filled // memset(pNewBuckets, 0, cNewBuckets * sizeof(PTR_VolatileEntry)); // Run through the old table and transfer all the entries. Be sure not to mess with the integrity of the - // old table while we are doing this, as there can be concurrent readers! Note that it is OK if the - // concurrent reader misses out on a match, though - they will have to acquire the lock on a miss & try - // again. - for (DWORD i = 0; i < m_cBuckets; i++) + // old table while we are doing this, as there can be concurrent readers! + // IMPORTANT: every entry must be reachable from the old bucket + // only after an entry appears in the correct bucket in the new table we can remove it from the old. + // it is ok to appear temporarily in a wrong bucket. + for (DWORD i = 0; i < cBuckets; i++) { - PTR_VolatileEntry pEntry = (GetBuckets())[i]; - - // Try to lock out readers from scanning this bucket. This is obviously a race which may fail. - // However, note that it's OK if somebody is already in the list - it's OK if we mess with the bucket - // groups, as long as we don't destroy anything. The lookup function will still do appropriate - // comparison even if it wanders aimlessly amongst entries while we are rearranging things. If a - // lookup finds a match under those circumstances, great. If not, they will have to acquire the lock & - // try again anyway. - (GetBuckets())[i] = NULL; + // +2 to skip "length" and "next" slots + DWORD dwCurBucket = i + SKIP_SPECIAL_SLOTS; + PTR_VolatileEntry pEntry = curBuckets[dwCurBucket]; while (pEntry != NULL) { - DWORD dwNewBucket = pEntry->m_iHashValue % cNewBuckets; + DWORD dwNewBucket = (pEntry->m_iHashValue % cNewBuckets) + SKIP_SPECIAL_SLOTS; PTR_VolatileEntry pNextEntry = pEntry->m_pNextEntry; - pEntry->m_pNextEntry = pNewBuckets[dwNewBucket]; - pNewBuckets[dwNewBucket] = pEntry; + PTR_VolatileEntry pTail = pNewBuckets[dwNewBucket]; + + // make the pEntry reachable in the new bucket, together with all the chain (that is temporary and ok) + if (pTail == NULL) + { + pNewBuckets[dwNewBucket] = pEntry; + } + else + { + while (pTail->m_pNextEntry) + { + pTail = pTail->m_pNextEntry; + } + + pTail->m_pNextEntry = pEntry; + } + + // skip pEntry in the old bucket after it appears in the new. + VolatileStore(&curBuckets[dwCurBucket], pNextEntry); + + // drop the rest of the bucket after old table starts referring to it + VolatileStore(&pEntry->m_pNextEntry, (PTR_VolatileEntry)NULL); pEntry = pNextEntry; } @@ -206,12 +237,6 @@ void DacEnumerableHashTable::GrowTable() // Make sure that all writes are visible before publishing the new array. VolatileStore(&m_pBuckets, pNewBuckets); - - // The new number of buckets has to be published last (prior to this readers may miscalculate a bucket - // index, but the result will always be in range and they'll simply walk the wrong chain and get a miss, - // prompting a retry under the lock). If we let the count become visible unordered wrt to the bucket array - // itself a reader could potentially read buckets from beyond the end of the old bucket list). - VolatileStore(&m_cBuckets, cNewBuckets); } // Returns the next prime larger (or equal to) than the number given. @@ -259,36 +284,63 @@ DPTR(VALUE) DacEnumerableHashTable::BaseFindFirstEntryByHash if (m_cEntries == 0) return NULL; - // Since there is at least one entry there must be at least one bucket. - _ASSERTE(m_cBuckets > 0); - - // Compute which bucket the entry belongs in based on the hash. - DWORD dwBucket = iHash % VolatileLoad(&m_cBuckets); + DPTR(PTR_VolatileEntry) curBuckets = GetBuckets(); + return BaseFindFirstEntryByHashCore(curBuckets, iHash, pContext); +} - // Point at the first entry in the bucket chain which would contain any entries with the given hash code. - PTR_VolatileEntry pEntry = (GetBuckets())[dwBucket]; +template +DPTR(VALUE) DacEnumerableHashTable::BaseFindFirstEntryByHashCore(DPTR(PTR_VolatileEntry) curBuckets, DacEnumerableHashValue iHash, LookupContext* pContext) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + SUPPORTS_DAC; + PRECONDITION(CheckPointer(pContext)); + } + CONTRACTL_END; - // Walk the bucket chain one entry at a time. - while (pEntry) + do { - if (pEntry->m_iHashValue == iHash) + DWORD cBuckets = GetLength(curBuckets); + + // Compute which bucket the entry belongs in based on the hash. + // +2 to skip "length" and "next" slots + DWORD dwBucket = iHash % cBuckets + SKIP_SPECIAL_SLOTS; + + // Point at the first entry in the bucket chain which would contain any entries with the given hash code. + PTR_VolatileEntry pEntry = curBuckets[dwBucket]; + + // Walk the bucket chain one entry at a time. + while (pEntry) { - // We've found our match. + if (pEntry->m_iHashValue == iHash) + { + // We've found our match. + + // Record our current search state into the provided context so that a subsequent call to + // BaseFindNextEntryByHash can pick up the search where it left off. + pContext->m_pEntry = dac_cast(pEntry); + pContext->m_curBuckets = curBuckets; - // Record our current search state into the provided context so that a subsequent call to - // BaseFindNextEntryByHash can pick up the search where it left off. - pContext->m_pEntry = dac_cast(pEntry); + // Return the address of the sub-classes' embedded entry structure. + return VALUE_FROM_VOLATILE_ENTRY(pEntry); + } - // Return the address of the sub-classes' embedded entry structure. - return VALUE_FROM_VOLATILE_ENTRY(pEntry); + // Move to the next entry in the chain. + pEntry = pEntry->m_pNextEntry; } - // Move to the next entry in the chain. - pEntry = pEntry->m_pNextEntry; - } + // in a rare case if resize is in progress, look in the new table as well. + // if existing entry is not in the old table, it must be in the new + // since we unlink it from old only after linking into the new. + // check for next table must hapen after we looked through the current. + VolatileLoadBarrier(); + curBuckets = GetNext(curBuckets); + } while (curBuckets != nullptr); // If we get here then none of the entries in the target bucket matched the hash code and we have a miss - // (for this section of the table at least). return NULL; } @@ -329,6 +381,16 @@ DPTR(VALUE) DacEnumerableHashTable::BaseFindNextEntryByHash( } } + // check for next table must hapen after we looked through the current. + VolatileLoadBarrier(); + + // in a case if resize is in progress, look in the new table as well. + DPTR(PTR_VolatileEntry) nextBuckets = GetNext(pContext->m_curBuckets); + if (nextBuckets != nullptr) + { + return BaseFindFirstEntryByHashCore(nextBuckets, iHash, pContext); + } + return NULL; } @@ -373,15 +435,19 @@ void DacEnumerableHashTable::EnumMemoryRegions(CLRDataEnumMe // sub-class). DacEnumMemoryRegion(dac_cast(this), sizeof(FINAL_CLASS)); + DPTR(PTR_VolatileEntry) curBuckets = GetBuckets(); + DWORD cBuckets = GetLength(curBuckets); + // Save the bucket list. - DacEnumMemoryRegion(dac_cast(GetBuckets()), m_cBuckets * sizeof(VolatileEntry*)); + DacEnumMemoryRegion(dac_cast(curBuckets), cBuckets * sizeof(VolatileEntry*)); // Save all the entries. if (GetBuckets().IsValid()) { - for (DWORD i = 0; i < m_cBuckets; i++) + for (DWORD i = 0; i < cBuckets; i++) { - PTR_VolatileEntry pEntry = (GetBuckets())[i]; + //+2 to skip "length" and "next" slots + PTR_VolatileEntry pEntry = curBuckets[i + SKIP_SPECIAL_SLOTS]; while (pEntry.IsValid()) { pEntry.EnumMem(); @@ -394,6 +460,9 @@ void DacEnumerableHashTable::EnumMemoryRegions(CLRDataEnumMe } } + // we should not be resizing while enumerating memory regions. + _ASSERTE(GetNext(curBuckets) == NULL); + // Save the module if present. if (GetModule().IsValid()) GetModule()->EnumMemoryRegions(flags, true); @@ -409,7 +478,8 @@ void DacEnumerableHashTable::BaseInitIterator(BaseIterator * pIterator->m_pTable = dac_cast)>(this); pIterator->m_pEntry = NULL; - pIterator->m_dwBucket = 0; + //+2 to skip "length" and "next" slots + pIterator->m_dwBucket = SKIP_SPECIAL_SLOTS; } // Returns a pointer to the next entry in the hash table or NULL once all entries have been enumerated. Once @@ -426,13 +496,16 @@ DPTR(VALUE) DacEnumerableHashTable::BaseIterator::Next() } CONTRACTL_END; - while (m_dwBucket < m_pTable->m_cBuckets) + DPTR(PTR_VolatileEntry) curBuckets = m_pTable->GetBuckets(); + DWORD cBuckets = GetLength(curBuckets); + + while (m_dwBucket < cBuckets) { if (m_pEntry == NULL) { // This is our first lookup for a particular bucket, return the first // entry in that bucket. - m_pEntry = dac_cast((m_pTable->GetBuckets())[m_dwBucket]); + m_pEntry = dac_cast(curBuckets[m_dwBucket]); } else { @@ -449,5 +522,9 @@ DPTR(VALUE) DacEnumerableHashTable::BaseIterator::Next() // buckets left to scan go back around again. m_dwBucket++; } + + // we should not be resizing while iterating. + _ASSERTE(GetNext(curBuckets) == NULL); + return NULL; } diff --git a/src/coreclr/vm/domainfile.cpp b/src/coreclr/vm/domainfile.cpp index 19de3c58c4bc9..6fae1b0e3375f 100644 --- a/src/coreclr/vm/domainfile.cpp +++ b/src/coreclr/vm/domainfile.cpp @@ -723,7 +723,7 @@ DomainAssembly::~DomainAssembly() if (m_fHostAssemblyPublished) { // Remove association first. - UnRegisterFromHostAssembly(); + UnregisterFromHostAssembly(); } ModuleIterator i = IterateModules(kModIterIncludeLoading); @@ -873,7 +873,7 @@ void DomainAssembly::RegisterWithHostAssembly() } } -void DomainAssembly::UnRegisterFromHostAssembly() +void DomainAssembly::UnregisterFromHostAssembly() { CONTRACTL { diff --git a/src/coreclr/vm/domainfile.h b/src/coreclr/vm/domainfile.h index 7650cedbf13db..917af193ceade 100644 --- a/src/coreclr/vm/domainfile.h +++ b/src/coreclr/vm/domainfile.h @@ -588,7 +588,7 @@ class DomainAssembly : public DomainFile void DeliverSyncEvents(); void DeliverAsyncEvents(); void RegisterWithHostAssembly(); - void UnRegisterFromHostAssembly(); + void UnregisterFromHostAssembly(); #endif public: diff --git a/src/coreclr/vm/genmeth.cpp b/src/coreclr/vm/genmeth.cpp index e9f3452439b31..f847fbbd0d5d6 100644 --- a/src/coreclr/vm/genmeth.cpp +++ b/src/coreclr/vm/genmeth.cpp @@ -1418,9 +1418,6 @@ void InstantiatedMethodDesc::SetupGenericMethodDefinition(IMDInternalImport *pIM { // Protect multi-threaded access to Module.m_GenericParamToDescMap. Other threads may be loading the same type // to break type recursion dead-locks - - // m_AvailableTypesLock has to be taken in cooperative mode to avoid deadlocks during GC - GCX_COOP(); CrstHolder ch(&pModule->GetClassLoader()->m_AvailableTypesLock); for (unsigned int i = 0; i < numTyPars; i++) diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index 90f451a1dfde1..133f1b92fe021 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -11771,9 +11771,6 @@ MethodTableBuilder::GatherGenericsInfo( { // Protect multi-threaded access to Module.m_GenericParamToDescMap. Other threads may be loading the same type // to break type recursion dead-locks - - // m_AvailableTypesLock has to be taken in cooperative mode to avoid deadlocks during GC - GCX_COOP(); CrstHolder ch(&pModule->GetClassLoader()->m_AvailableTypesLock); for (unsigned int i = 0; i < numGenericArgs; i++)