From bc8a0e5d0f7e1d65c5f19b25d5f76e44435ce4b2 Mon Sep 17 00:00:00 2001 From: Mark Visschers Date: Mon, 16 May 2022 21:07:38 -0400 Subject: [PATCH 01/18] Created tests that show the collections contains methods work --- LiteDB.Tests/Database/Contains_Tests.cs | 96 +++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 LiteDB.Tests/Database/Contains_Tests.cs diff --git a/LiteDB.Tests/Database/Contains_Tests.cs b/LiteDB.Tests/Database/Contains_Tests.cs new file mode 100644 index 000000000..68dce243a --- /dev/null +++ b/LiteDB.Tests/Database/Contains_Tests.cs @@ -0,0 +1,96 @@ +using LiteDB; +using FluentAssertions; +using Xunit; +using System.IO; +using System.Linq; +using System.Collections.Generic; +using System; + +namespace LiteDB.Tests.Database +{ + public class Contains_Tests + { + [Fact] + public void ArrayContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + Array = new int[] { randomValue } + }); + + var result = collection.Find(i => i.Array.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + [Fact] + public void EnumerableAssignedArrayContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + Enumerable = new int[] { randomValue } + }); + + var result = collection.Find(i => i.Enumerable.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + [Fact] + public void EnumerableAssignedListContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + Enumerable = new List { randomValue } + }); + + var result = collection.Find(i => i.Enumerable.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + [Fact] + public void ListContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + List = new List { randomValue } + }); + + var result = collection.Find(i => i.List.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + public class ItemWithEnumerable + { + public int[] Array { get; set; } + public IEnumerable Enumerable { get; set; } + public IList List { get; set; } + } + } +} \ No newline at end of file From 3fb34cc199973beb3e82dc0de5270852dfc4b486 Mon Sep 17 00:00:00 2001 From: abereznikov Date: Wed, 20 Mar 2024 14:08:12 +0500 Subject: [PATCH 02/18] Lazy string interpolation in ENSURE --- LiteDB/Engine/Disk/DiskService.cs | 4 +-- LiteDB/Engine/Disk/Streams/AesStream.cs | 8 ++--- LiteDB/Engine/FileReader/FileReaderV8.cs | 34 ++++++++++---------- LiteDB/Engine/Pages/BasePage.cs | 12 +++---- LiteDB/Engine/Pages/DataPage.cs | 4 +-- LiteDB/Engine/Services/DataService.cs | 2 +- LiteDB/Engine/Services/IndexService.cs | 24 +++++++------- LiteDB/Engine/Services/TransactionService.cs | 6 ++-- LiteDB/Utils/Constants.cs | 14 ++++++++ 9 files changed, 61 insertions(+), 47 deletions(-) diff --git a/LiteDB/Engine/Disk/DiskService.cs b/LiteDB/Engine/Disk/DiskService.cs index d414da4e1..76262b86a 100644 --- a/LiteDB/Engine/Disk/DiskService.cs +++ b/LiteDB/Engine/Disk/DiskService.cs @@ -29,7 +29,7 @@ internal class DiskService : IDisposable private long _logLength; public DiskService( - EngineSettings settings, + EngineSettings settings, EngineState state, int[] memorySegmentSizes) { @@ -261,7 +261,7 @@ public IEnumerable ReadFull(FileOrigin origin) var bytesRead = stream.Read(buffer, 0, PAGE_SIZE); - ENSURE(bytesRead == PAGE_SIZE, $"ReadFull must read PAGE_SIZE bytes [{bytesRead}]"); + ENSURE(bytesRead == PAGE_SIZE, () => $"ReadFull must read PAGE_SIZE bytes [{bytesRead}]"); yield return new PageBuffer(buffer, 0, 0) { diff --git a/LiteDB/Engine/Disk/Streams/AesStream.cs b/LiteDB/Engine/Disk/Streams/AesStream.cs index 90f4edc80..f9ab9fd6d 100644 --- a/LiteDB/Engine/Disk/Streams/AesStream.cs +++ b/LiteDB/Engine/Disk/Streams/AesStream.cs @@ -22,7 +22,7 @@ public class AesStream : Stream private readonly byte[] _decryptedZeroes = new byte[16]; - private static readonly byte[] _emptyContent = new byte[PAGE_SIZE - 1 - 16]; // 1 for aes indicator + 16 for salt + private static readonly byte[] _emptyContent = new byte[PAGE_SIZE - 1 - 16]; // 1 for aes indicator + 16 for salt public byte[] Salt { get; } @@ -111,7 +111,7 @@ public AesStream(string password, Stream stream) // check whether bytes 32 to 64 is empty. This indicates LiteDb was unable to write encrypted 1s during last attempt. _stream.Read(checkBuffer, 0, checkBuffer.Length); isNew = checkBuffer.All(x => x == 0); - + // reset checkBuffer and stream position Array.Clear(checkBuffer, 0, checkBuffer.Length); _stream.Position = 32; @@ -160,7 +160,7 @@ public AesStream(string password, Stream stream) /// public override int Read(byte[] array, int offset, int count) { - ENSURE(this.Position % PAGE_SIZE == 0, $"AesRead: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); + ENSURE(this.Position % PAGE_SIZE == 0, () => $"AesRead: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); var r = _reader.Read(array, offset, count); @@ -181,7 +181,7 @@ public override int Read(byte[] array, int offset, int count) public override void Write(byte[] array, int offset, int count) { ENSURE(count == PAGE_SIZE || count == 1, "buffer size must be PAGE_SIZE"); - ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, $"AesWrite: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); + ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, () => $"AesWrite: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); _writer.Write(array, offset, count); } diff --git a/LiteDB/Engine/FileReader/FileReaderV8.cs b/LiteDB/Engine/FileReader/FileReaderV8.cs index a53a90bcf..525b945bc 100644 --- a/LiteDB/Engine/FileReader/FileReaderV8.cs +++ b/LiteDB/Engine/FileReader/FileReaderV8.cs @@ -117,7 +117,7 @@ public IEnumerable GetDocuments(string collection) var colID = _collections[collection]; if (!_collectionsDataPages.ContainsKey(colID)) yield break; - + var dataPages = _collectionsDataPages[colID]; var uniqueIDs = new HashSet(); @@ -156,8 +156,8 @@ public IEnumerable GetDocuments(string collection) // empty slot if (position == 0) continue; - ENSURE(position > 0 && length > 0, $"Invalid footer ref position {position} with length {length}"); - ENSURE(position + length < PAGE_SIZE, $"Invalid footer ref position {position} with length {length}"); + ENSURE(position > 0 && length > 0, () => $"Invalid footer ref position {position} with length {length}"); + ENSURE(position + length < PAGE_SIZE, () => $"Invalid footer ref position {position} with length {length}"); // get segment slice var segment = buffer.Slice(position, length); @@ -183,8 +183,8 @@ public IEnumerable GetDocuments(string collection) var nextBuffer = nextPage.Value.Buffer; // make page validations - ENSURE(nextPage.Value.PageType == PageType.Data, $"Invalid PageType (excepted Data, get {nextPage.Value.PageType})"); - ENSURE(nextPage.Value.ColID == colID, $"Invalid ColID in this page (expected {colID}, get {nextPage.Value.ColID})"); + ENSURE(nextPage.Value.PageType == PageType.Data, () => $"Invalid PageType (excepted Data, get {nextPage.Value.PageType})"); + ENSURE(nextPage.Value.ColID == colID, () => $"Invalid ColID in this page (expected {colID}, get {nextPage.Value.ColID})"); ENSURE(nextPage.Value.ItemsCount > 0, "Page with no items count"); // read slot address @@ -196,7 +196,7 @@ public IEnumerable GetDocuments(string collection) length = nextBuffer.ReadUInt16(lengthAddr); // empty slot - ENSURE(length > 0, $"Last DataBlock request a next extend to {nextBlock}, but this block are empty footer"); + ENSURE(length > 0, () => $"Last DataBlock request a next extend to {nextBlock}, but this block are empty footer"); // get segment slice segment = nextBuffer.Slice(position, length); @@ -204,7 +204,7 @@ public IEnumerable GetDocuments(string collection) nextBlock = segment.ReadPageAddress(DataBlock.P_NEXT_BLOCK); data = segment.Slice(DataBlock.P_BUFFER, segment.Count - DataBlock.P_BUFFER); - ENSURE(extend == true, $"Next datablock always be an extend. Invalid data block {nextBlock}"); + ENSURE(extend == true, () => $"Next datablock always be an extend. Invalid data block {nextBlock}"); // write data on memorystream @@ -219,8 +219,8 @@ public IEnumerable GetDocuments(string collection) var docResult = r.ReadDocument(); var id = docResult.Value["_id"]; - ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), $"Invalid _id value: {id}"); - ENSURE(uniqueIDs.Contains(id) == false, $"Duplicated _id value: {id}"); + ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), () => $"Invalid _id value: {id}"); + ENSURE(uniqueIDs.Contains(id) == false, () => $"Duplicated _id value: {id}"); uniqueIDs.Add(id); @@ -279,7 +279,7 @@ private void LoadDataPages() var header = this.ReadPage(0, out var pageInfo).GetValue(); var lastPageID = header.Buffer.ReadUInt32(HeaderPage.P_LAST_PAGE_ID); //TOFO: tentar não usar esse valor como referencia (varrer tudo) - ENSURE(lastPageID <= _maxPageID, $"LastPageID {lastPageID} should be less or equals to maxPageID {_maxPageID}"); + ENSURE(lastPageID <= _maxPageID, () => $"LastPageID {lastPageID} should be less or equals to maxPageID {_maxPageID}"); for (uint i = 0; i <= lastPageID; i++) { @@ -398,8 +398,8 @@ private void LoadIndexes() position += 15; // head 5 bytes, tail 5 bytes, reserved 1 byte, freeIndexPageList 4 bytes - ENSURE(!string.IsNullOrEmpty(name), $"Index name can't be empty (collection {collection.Key} - index: {i})"); - ENSURE(!string.IsNullOrEmpty(expr), $"Index expression can't be empty (collection {collection.Key} - index: {i})"); + ENSURE(!string.IsNullOrEmpty(name), () => $"Index name can't be empty (collection {collection.Key} - index: {i})"); + ENSURE(!string.IsNullOrEmpty(expr), () => $"Index expression can't be empty (collection {collection.Key} - index: {i})"); var indexInfo = new IndexInfo { @@ -481,7 +481,7 @@ private void LoadIndexMap() pageInfo.PageID = pageID; pageInfo.ColID = buffer.ReadUInt32(BasePage.P_COL_ID); - ENSURE(read == PAGE_SIZE, $"Page position {_logStream} read only than {read} bytes (instead {PAGE_SIZE})"); + ENSURE(read == PAGE_SIZE, () => $"Page position {_logStream} read only than {read} bytes (instead {PAGE_SIZE})"); var position = new PagePosition(pageID, currentPosition); @@ -515,7 +515,7 @@ private void LoadIndexMap() { var mapIndexPages = transactions[transactionID]; - // update + // update foreach (var page in mapIndexPages) { _logIndexMap[page.PageID] = page.Position; @@ -532,7 +532,7 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo) try { - ENSURE(pageID <= _maxPageID, $"PageID: {pageID} should be less then or equals to maxPageID: {_maxPageID}"); + ENSURE(pageID <= _maxPageID, () => $"PageID: {pageID} should be less then or equals to maxPageID: {_maxPageID}"); var pageBuffer = new PageBuffer(new byte[PAGE_SIZE], 0, PAGE_SIZE); Stream stream; @@ -556,13 +556,13 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo) read = stream.Read(pageBuffer.Array, pageBuffer.Offset, pageBuffer.Count); - ENSURE(read == PAGE_SIZE, $"Page position {stream.Position} read only than {read} bytes (instead {PAGE_SIZE})"); + ENSURE(read == PAGE_SIZE, () => $"Page position {stream.Position} read only than {read} bytes (instead {PAGE_SIZE})"); var page = new BasePage(pageBuffer); pageInfo.ColID = page.ColID; - ENSURE(page.PageID == pageID, $"Expect read pageID: {pageID} but header contains pageID: {page.PageID}"); + ENSURE(page.PageID == pageID, () => $"Expect read pageID: {pageID} but header contains pageID: {page.PageID}"); return page; } diff --git a/LiteDB/Engine/Pages/BasePage.cs b/LiteDB/Engine/Pages/BasePage.cs index 92b645206..3fb37cb95 100644 --- a/LiteDB/Engine/Pages/BasePage.cs +++ b/LiteDB/Engine/Pages/BasePage.cs @@ -102,8 +102,8 @@ internal class BasePage /// Get how many bytes are used in footer page at this moment /// ((HighestIndex + 1) * 4 bytes per slot: [2 for position, 2 for length]) /// - public int FooterSize => - (this.HighestIndex == byte.MaxValue ? + public int FooterSize => + (this.HighestIndex == byte.MaxValue ? 0 : // no items in page ((this.HighestIndex + 1) * SLOT_SIZE)); // 4 bytes PER item (2 to position + 2 to length) - need consider HighestIndex used @@ -282,8 +282,8 @@ public BufferSlice Get(byte index) var position = _buffer.ReadUInt16(positionAddr); var length = _buffer.ReadUInt16(lengthAddr); - ENSURE(this.IsValidPos(position), $"invalid segment position in index footer: {ToString()}/{index}"); - ENSURE(this.IsValidLen(length), $"invalid segment length in index footer: {ToString()}/{index}"); + ENSURE(this.IsValidPos(position), () => $"invalid segment position in index footer: {ToString()}/{index}"); + ENSURE(this.IsValidLen(length), () => $"invalid segment length in index footer: {ToString()}/{index}"); // return buffer slice with content only data return _buffer.Slice(position, length); @@ -408,7 +408,7 @@ public void Delete(byte index) this.NextFreePosition = position; } else - { + { // if segment is in middle of the page, add this blocks as fragment block this.FragmentedBytes += length; } @@ -475,7 +475,7 @@ public BufferSlice Update(byte index, ushort bytesLength) if (isLastSegment) { - // if is at end of page, must get back unused blocks + // if is at end of page, must get back unused blocks this.NextFreePosition -= diff; } else diff --git a/LiteDB/Engine/Pages/DataPage.cs b/LiteDB/Engine/Pages/DataPage.cs index 842bb49d4..c59796c85 100644 --- a/LiteDB/Engine/Pages/DataPage.cs +++ b/LiteDB/Engine/Pages/DataPage.cs @@ -16,7 +16,7 @@ internal class DataPage : BasePage public DataPage(PageBuffer buffer) : base(buffer) { - ENSURE(this.PageType == PageType.Data, $"Page type must be data page: {PageType}"); + ENSURE(this.PageType == PageType.Data, () => $"Page type must be data page: {PageType}"); if (this.PageType != PageType.Data) throw LiteException.InvalidPageType(PageType.Data, this); } @@ -108,7 +108,7 @@ public IEnumerable GetBlocks() /// A slot number between 0 and 4 public static byte FreeIndexSlot(int freeBytes) { - ENSURE(freeBytes >= 0, $"FreeBytes must be positive: {freeBytes}"); + ENSURE(freeBytes >= 0, () => $"FreeBytes must be positive: {freeBytes}"); for (var i = 0; i < _freePageSlots.Length; i++) { diff --git a/LiteDB/Engine/Services/DataService.cs b/LiteDB/Engine/Services/DataService.cs index 499968b4e..ce5ea1a95 100644 --- a/LiteDB/Engine/Services/DataService.cs +++ b/LiteDB/Engine/Services/DataService.cs @@ -165,7 +165,7 @@ public IEnumerable Read(PageAddress address) while (address != PageAddress.Empty) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in data Read({address})"); + ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in data Read({address})"); var dataPage = _snapshot.GetPage(address.PageID); diff --git a/LiteDB/Engine/Services/IndexService.cs b/LiteDB/Engine/Services/IndexService.cs index 9dcf6b771..ac6f156e6 100644 --- a/LiteDB/Engine/Services/IndexService.cs +++ b/LiteDB/Engine/Services/IndexService.cs @@ -79,10 +79,10 @@ public IndexNode AddNode(CollectionIndex index, BsonValue key, PageAddress dataB /// Insert a new node index inside an collection index. /// private IndexNode AddNode( - CollectionIndex index, - BsonValue key, - PageAddress dataBlock, - byte insertLevels, + CollectionIndex index, + BsonValue key, + PageAddress dataBlock, + byte insertLevels, IndexNode last) { // get a free index page for head note @@ -108,7 +108,7 @@ private IndexNode AddNode( // while: scan from left to right while (right.IsEmpty == false && right != index.Tail) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in AddNode({node.Position})"); + ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in AddNode({node.Position})"); var rightNode = this.GetNode(right); @@ -206,7 +206,7 @@ public IEnumerable GetNodeList(PageAddress nodeAddress) while (node != null) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in GetNodeList({nodeAddress})"); + ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in GetNodeList({nodeAddress})"); yield return node; @@ -225,7 +225,7 @@ public void DeleteAll(PageAddress pkAddress) while (node != null) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in DeleteAll({pkAddress})"); + ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in DeleteAll({pkAddress})"); this.DeleteSingleNode(node, indexes[node.Slot]); @@ -246,7 +246,7 @@ public IndexNode DeleteList(PageAddress pkAddress, HashSet toDelete while (node != null) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in DeleteList({pkAddress})"); + ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in DeleteList({pkAddress})"); if (toDelete.Contains(node.Position)) { @@ -333,7 +333,7 @@ public void DropIndex(CollectionIndex index) } #region Find - + /// /// Return all index nodes from an index /// @@ -344,7 +344,7 @@ public IEnumerable FindAll(CollectionIndex index, int order) while (!cur.GetNextPrev(0, order).IsEmpty) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in FindAll({index.Name})"); + ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in FindAll({index.Name})"); cur = this.GetNode(cur.GetNextPrev(0, order)); @@ -356,7 +356,7 @@ public IEnumerable FindAll(CollectionIndex index, int order) } /// - /// Find first node that index match with value . + /// Find first node that index match with value . /// If index are unique, return unique value - if index are not unique, return first found (can start, middle or end) /// If not found but sibling = true and key are not found, returns next value index node (if order = Asc) or prev node (if order = Desc) /// @@ -371,7 +371,7 @@ public IndexNode Find(CollectionIndex index, BsonValue value, bool sibling, int while (right.IsEmpty == false) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in Find({index.Name}, {value})"); + ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in Find({index.Name}, {value})"); var rightNode = this.GetNode(right); diff --git a/LiteDB/Engine/Services/TransactionService.cs b/LiteDB/Engine/Services/TransactionService.cs index 8163e3643..f78003647 100644 --- a/LiteDB/Engine/Services/TransactionService.cs +++ b/LiteDB/Engine/Services/TransactionService.cs @@ -113,7 +113,7 @@ public Snapshot CreateSnapshot(LockMode mode, string collection, bool addIfNotEx _snapshots[collection] = snapshot = create(); } - // update transaction mode to write in first write snaphost request + // update transaction mode to write in first write snaphost request if (mode == LockMode.Write) _mode = LockMode.Write; return snapshot; @@ -250,7 +250,7 @@ IEnumerable source() /// public void Commit() { - ENSURE(_state == TransactionState.Active, $"transaction must be active to commit (current state: {_state})"); + ENSURE(_state == TransactionState.Active, () => $"transaction must be active to commit (current state: {_state})"); LOG($"commit transaction ({_transPages.TransactionSize} pages)", "TRANSACTION"); @@ -281,7 +281,7 @@ public void Commit() /// public void Rollback() { - ENSURE(_state == TransactionState.Active, $"transaction must be active to rollback (current state: {_state})"); + ENSURE(_state == TransactionState.Active, () => $"transaction must be active to rollback (current state: {_state})"); LOG($"rollback transaction ({_transPages.TransactionSize} pages with {_transPages.NewPages.Count} returns)", "TRANSACTION"); diff --git a/LiteDB/Utils/Constants.cs b/LiteDB/Utils/Constants.cs index a33aa70ec..b33272d21 100644 --- a/LiteDB/Utils/Constants.cs +++ b/LiteDB/Utils/Constants.cs @@ -144,6 +144,20 @@ public static void ENSURE(bool conditional, string message = null) } } + [DebuggerHidden] + public static void ENSURE(bool conditional, Func messageProvider) + { + if (conditional == false) + { + if (Debugger.IsAttached) + { + Debugger.Break(); + } + + throw LiteException.InvalidDatafileState(messageProvider()); + } + } + /// /// If ifTest are true, ensure condition is true, otherwise throw ensure exception (check contract) /// From 6ecf9e8b7110892532d5f9baaff8653b86b431c5 Mon Sep 17 00:00:00 2001 From: abereznikov Date: Wed, 27 Mar 2024 15:10:13 +0500 Subject: [PATCH 03/18] Use more conventional way to postpone formatting --- LiteDB/Engine/Disk/DiskService.cs | 5 +--- LiteDB/Engine/Disk/Streams/AesStream.cs | 4 +-- LiteDB/Engine/FileReader/FileReaderV8.cs | 30 ++++++++++---------- LiteDB/Engine/Pages/BasePage.cs | 4 +-- LiteDB/Engine/Pages/DataPage.cs | 6 ++-- LiteDB/Engine/Services/DataService.cs | 2 +- LiteDB/Engine/Services/IndexService.cs | 12 ++++---- LiteDB/Engine/Services/TransactionService.cs | 4 +-- LiteDB/Utils/Constants.cs | 8 ++++-- 9 files changed, 37 insertions(+), 38 deletions(-) diff --git a/LiteDB/Engine/Disk/DiskService.cs b/LiteDB/Engine/Disk/DiskService.cs index 76262b86a..052ae18e6 100644 --- a/LiteDB/Engine/Disk/DiskService.cs +++ b/LiteDB/Engine/Disk/DiskService.cs @@ -1,10 +1,7 @@ using System; -using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; -using System.Text; using System.Threading; -using System.Threading.Tasks; using static LiteDB.Constants; namespace LiteDB.Engine @@ -261,7 +258,7 @@ public IEnumerable ReadFull(FileOrigin origin) var bytesRead = stream.Read(buffer, 0, PAGE_SIZE); - ENSURE(bytesRead == PAGE_SIZE, () => $"ReadFull must read PAGE_SIZE bytes [{bytesRead}]"); + ENSURE(bytesRead == PAGE_SIZE, "ReadFull must read PAGE_SIZE bytes [{0}]", bytesRead); yield return new PageBuffer(buffer, 0, 0) { diff --git a/LiteDB/Engine/Disk/Streams/AesStream.cs b/LiteDB/Engine/Disk/Streams/AesStream.cs index f9ab9fd6d..fce5cca3b 100644 --- a/LiteDB/Engine/Disk/Streams/AesStream.cs +++ b/LiteDB/Engine/Disk/Streams/AesStream.cs @@ -160,7 +160,7 @@ public AesStream(string password, Stream stream) /// public override int Read(byte[] array, int offset, int count) { - ENSURE(this.Position % PAGE_SIZE == 0, () => $"AesRead: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); + ENSURE(this.Position % PAGE_SIZE == 0, "AesRead: position must be in PAGE_SIZE module. Position={0}, File={1}", this.Position, _name); var r = _reader.Read(array, offset, count); @@ -181,7 +181,7 @@ public override int Read(byte[] array, int offset, int count) public override void Write(byte[] array, int offset, int count) { ENSURE(count == PAGE_SIZE || count == 1, "buffer size must be PAGE_SIZE"); - ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, () => $"AesWrite: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); + ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, "AesWrite: position must be in PAGE_SIZE module. Position={0}, File={1}", this.Position, _name); _writer.Write(array, offset, count); } diff --git a/LiteDB/Engine/FileReader/FileReaderV8.cs b/LiteDB/Engine/FileReader/FileReaderV8.cs index 525b945bc..3230c9d57 100644 --- a/LiteDB/Engine/FileReader/FileReaderV8.cs +++ b/LiteDB/Engine/FileReader/FileReaderV8.cs @@ -156,8 +156,8 @@ public IEnumerable GetDocuments(string collection) // empty slot if (position == 0) continue; - ENSURE(position > 0 && length > 0, () => $"Invalid footer ref position {position} with length {length}"); - ENSURE(position + length < PAGE_SIZE, () => $"Invalid footer ref position {position} with length {length}"); + ENSURE(position > 0 && length > 0, "Invalid footer ref position {0} with length {1}", position, length); + ENSURE(position + length < PAGE_SIZE, "Invalid footer ref position {0} with length {1}", position, length); // get segment slice var segment = buffer.Slice(position, length); @@ -183,8 +183,8 @@ public IEnumerable GetDocuments(string collection) var nextBuffer = nextPage.Value.Buffer; // make page validations - ENSURE(nextPage.Value.PageType == PageType.Data, () => $"Invalid PageType (excepted Data, get {nextPage.Value.PageType})"); - ENSURE(nextPage.Value.ColID == colID, () => $"Invalid ColID in this page (expected {colID}, get {nextPage.Value.ColID})"); + ENSURE(nextPage.Value.PageType == PageType.Data, "Invalid PageType (excepted Data, get {0})", nextPage.Value.PageType); + ENSURE(nextPage.Value.ColID == colID, "Invalid ColID in this page (expected {0}, get {1})", colID, nextPage.Value.ColID); ENSURE(nextPage.Value.ItemsCount > 0, "Page with no items count"); // read slot address @@ -196,7 +196,7 @@ public IEnumerable GetDocuments(string collection) length = nextBuffer.ReadUInt16(lengthAddr); // empty slot - ENSURE(length > 0, () => $"Last DataBlock request a next extend to {nextBlock}, but this block are empty footer"); + ENSURE(length > 0, "Last DataBlock request a next extend to {0}, but this block are empty footer", nextBlock); // get segment slice segment = nextBuffer.Slice(position, length); @@ -204,7 +204,7 @@ public IEnumerable GetDocuments(string collection) nextBlock = segment.ReadPageAddress(DataBlock.P_NEXT_BLOCK); data = segment.Slice(DataBlock.P_BUFFER, segment.Count - DataBlock.P_BUFFER); - ENSURE(extend == true, () => $"Next datablock always be an extend. Invalid data block {nextBlock}"); + ENSURE(extend == true, "Next datablock always be an extend. Invalid data block {0}", nextBlock); // write data on memorystream @@ -219,8 +219,8 @@ public IEnumerable GetDocuments(string collection) var docResult = r.ReadDocument(); var id = docResult.Value["_id"]; - ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), () => $"Invalid _id value: {id}"); - ENSURE(uniqueIDs.Contains(id) == false, () => $"Duplicated _id value: {id}"); + ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), "Invalid _id value: {0}", id); + ENSURE(uniqueIDs.Contains(id) == false, "Duplicated _id value: {0}", id); uniqueIDs.Add(id); @@ -279,7 +279,7 @@ private void LoadDataPages() var header = this.ReadPage(0, out var pageInfo).GetValue(); var lastPageID = header.Buffer.ReadUInt32(HeaderPage.P_LAST_PAGE_ID); //TOFO: tentar não usar esse valor como referencia (varrer tudo) - ENSURE(lastPageID <= _maxPageID, () => $"LastPageID {lastPageID} should be less or equals to maxPageID {_maxPageID}"); + ENSURE(lastPageID <= _maxPageID, "LastPageID {0} should be less or equals to maxPageID {1}", lastPageID, _maxPageID); for (uint i = 0; i <= lastPageID; i++) { @@ -398,8 +398,8 @@ private void LoadIndexes() position += 15; // head 5 bytes, tail 5 bytes, reserved 1 byte, freeIndexPageList 4 bytes - ENSURE(!string.IsNullOrEmpty(name), () => $"Index name can't be empty (collection {collection.Key} - index: {i})"); - ENSURE(!string.IsNullOrEmpty(expr), () => $"Index expression can't be empty (collection {collection.Key} - index: {i})"); + ENSURE(!string.IsNullOrEmpty(name), "Index name can't be empty (collection {0} - index: {1})", collection.Key, i); + ENSURE(!string.IsNullOrEmpty(expr), "Index expression can't be empty (collection {0} - index: {1})", collection.Key, i); var indexInfo = new IndexInfo { @@ -481,7 +481,7 @@ private void LoadIndexMap() pageInfo.PageID = pageID; pageInfo.ColID = buffer.ReadUInt32(BasePage.P_COL_ID); - ENSURE(read == PAGE_SIZE, () => $"Page position {_logStream} read only than {read} bytes (instead {PAGE_SIZE})"); + ENSURE(read == PAGE_SIZE, "Page position {0} read only than {1} bytes (instead {2})", _logStream, read, PAGE_SIZE); var position = new PagePosition(pageID, currentPosition); @@ -532,7 +532,7 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo) try { - ENSURE(pageID <= _maxPageID, () => $"PageID: {pageID} should be less then or equals to maxPageID: {_maxPageID}"); + ENSURE(pageID <= _maxPageID, "PageID: {0} should be less then or equals to maxPageID: {1}", pageID, _maxPageID); var pageBuffer = new PageBuffer(new byte[PAGE_SIZE], 0, PAGE_SIZE); Stream stream; @@ -556,13 +556,13 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo) read = stream.Read(pageBuffer.Array, pageBuffer.Offset, pageBuffer.Count); - ENSURE(read == PAGE_SIZE, () => $"Page position {stream.Position} read only than {read} bytes (instead {PAGE_SIZE})"); + ENSURE(read == PAGE_SIZE, "Page position {0} read only than {1} bytes (instead {2})", stream.Position, read, PAGE_SIZE); var page = new BasePage(pageBuffer); pageInfo.ColID = page.ColID; - ENSURE(page.PageID == pageID, () => $"Expect read pageID: {pageID} but header contains pageID: {page.PageID}"); + ENSURE(page.PageID == pageID, "Expect read pageID: {0} but header contains pageID: {1}", pageID, page.PageID); return page; } diff --git a/LiteDB/Engine/Pages/BasePage.cs b/LiteDB/Engine/Pages/BasePage.cs index 3fb37cb95..09f87c851 100644 --- a/LiteDB/Engine/Pages/BasePage.cs +++ b/LiteDB/Engine/Pages/BasePage.cs @@ -282,8 +282,8 @@ public BufferSlice Get(byte index) var position = _buffer.ReadUInt16(positionAddr); var length = _buffer.ReadUInt16(lengthAddr); - ENSURE(this.IsValidPos(position), () => $"invalid segment position in index footer: {ToString()}/{index}"); - ENSURE(this.IsValidLen(length), () => $"invalid segment length in index footer: {ToString()}/{index}"); + ENSURE(this.IsValidPos(position), "invalid segment position in index footer: {0}/{1}", this, index); + ENSURE(this.IsValidLen(length), "invalid segment length in index footer: {0}/{1}", this, index); // return buffer slice with content only data return _buffer.Slice(position, length); diff --git a/LiteDB/Engine/Pages/DataPage.cs b/LiteDB/Engine/Pages/DataPage.cs index c59796c85..bbda32ba9 100644 --- a/LiteDB/Engine/Pages/DataPage.cs +++ b/LiteDB/Engine/Pages/DataPage.cs @@ -1,6 +1,4 @@ using System.Collections.Generic; -using System.IO; -using System.Linq; using static LiteDB.Constants; namespace LiteDB.Engine @@ -16,7 +14,7 @@ internal class DataPage : BasePage public DataPage(PageBuffer buffer) : base(buffer) { - ENSURE(this.PageType == PageType.Data, () => $"Page type must be data page: {PageType}"); + ENSURE(this.PageType == PageType.Data, "Page type must be data page: {0}", PageType); if (this.PageType != PageType.Data) throw LiteException.InvalidPageType(PageType.Data, this); } @@ -108,7 +106,7 @@ public IEnumerable GetBlocks() /// A slot number between 0 and 4 public static byte FreeIndexSlot(int freeBytes) { - ENSURE(freeBytes >= 0, () => $"FreeBytes must be positive: {freeBytes}"); + ENSURE(freeBytes >= 0, "FreeBytes must be positive: {0}", freeBytes); for (var i = 0; i < _freePageSlots.Length; i++) { diff --git a/LiteDB/Engine/Services/DataService.cs b/LiteDB/Engine/Services/DataService.cs index ce5ea1a95..9ef48772c 100644 --- a/LiteDB/Engine/Services/DataService.cs +++ b/LiteDB/Engine/Services/DataService.cs @@ -165,7 +165,7 @@ public IEnumerable Read(PageAddress address) while (address != PageAddress.Empty) { - ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in data Read({address})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in data Read({0})", address); var dataPage = _snapshot.GetPage(address.PageID); diff --git a/LiteDB/Engine/Services/IndexService.cs b/LiteDB/Engine/Services/IndexService.cs index ac6f156e6..6b8fb230d 100644 --- a/LiteDB/Engine/Services/IndexService.cs +++ b/LiteDB/Engine/Services/IndexService.cs @@ -108,7 +108,7 @@ private IndexNode AddNode( // while: scan from left to right while (right.IsEmpty == false && right != index.Tail) { - ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in AddNode({node.Position})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in AddNode({0})", node.Position); var rightNode = this.GetNode(right); @@ -206,7 +206,7 @@ public IEnumerable GetNodeList(PageAddress nodeAddress) while (node != null) { - ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in GetNodeList({nodeAddress})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in GetNodeList({0})", nodeAddress); yield return node; @@ -225,7 +225,7 @@ public void DeleteAll(PageAddress pkAddress) while (node != null) { - ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in DeleteAll({pkAddress})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in DeleteAll({0})", pkAddress); this.DeleteSingleNode(node, indexes[node.Slot]); @@ -246,7 +246,7 @@ public IndexNode DeleteList(PageAddress pkAddress, HashSet toDelete while (node != null) { - ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in DeleteList({pkAddress})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in DeleteList({0})", pkAddress); if (toDelete.Contains(node.Position)) { @@ -344,7 +344,7 @@ public IEnumerable FindAll(CollectionIndex index, int order) while (!cur.GetNextPrev(0, order).IsEmpty) { - ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in FindAll({index.Name})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in FindAll({0})", index.Name); cur = this.GetNode(cur.GetNextPrev(0, order)); @@ -371,7 +371,7 @@ public IndexNode Find(CollectionIndex index, BsonValue value, bool sibling, int while (right.IsEmpty == false) { - ENSURE(counter++ < _maxItemsCount, () => $"Detected loop in Find({index.Name}, {value})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in Find({0}, {1})", index.Name, value); var rightNode = this.GetNode(right); diff --git a/LiteDB/Engine/Services/TransactionService.cs b/LiteDB/Engine/Services/TransactionService.cs index f78003647..31c2a6e42 100644 --- a/LiteDB/Engine/Services/TransactionService.cs +++ b/LiteDB/Engine/Services/TransactionService.cs @@ -250,7 +250,7 @@ IEnumerable source() /// public void Commit() { - ENSURE(_state == TransactionState.Active, () => $"transaction must be active to commit (current state: {_state})"); + ENSURE(_state == TransactionState.Active, "transaction must be active to commit (current state: {0})", _state); LOG($"commit transaction ({_transPages.TransactionSize} pages)", "TRANSACTION"); @@ -281,7 +281,7 @@ public void Commit() /// public void Rollback() { - ENSURE(_state == TransactionState.Active, () => $"transaction must be active to rollback (current state: {_state})"); + ENSURE(_state == TransactionState.Active, "transaction must be active to rollback (current state: {0})", _state); LOG($"rollback transaction ({_transPages.TransactionSize} pages with {_transPages.NewPages.Count} returns)", "TRANSACTION"); diff --git a/LiteDB/Utils/Constants.cs b/LiteDB/Utils/Constants.cs index b33272d21..91c480f30 100644 --- a/LiteDB/Utils/Constants.cs +++ b/LiteDB/Utils/Constants.cs @@ -11,6 +11,8 @@ namespace LiteDB { + using System.Globalization; + /// /// Class with all constants used in LiteDB + Debbuger HELPER /// @@ -145,7 +147,7 @@ public static void ENSURE(bool conditional, string message = null) } [DebuggerHidden] - public static void ENSURE(bool conditional, Func messageProvider) + public static void ENSURE(bool conditional, string format, params object[] args) { if (conditional == false) { @@ -154,7 +156,9 @@ public static void ENSURE(bool conditional, Func messageProvider) Debugger.Break(); } - throw LiteException.InvalidDatafileState(messageProvider()); + var message = string.Format(CultureInfo.InvariantCulture, format, args); + + throw LiteException.InvalidDatafileState(format); } } From 12a6a08d26c6a2ad9ba5e566d6c761a30080c754 Mon Sep 17 00:00:00 2001 From: Krzysztof Pajak Date: Thu, 4 Apr 2024 20:58:45 +0200 Subject: [PATCH 04/18] Update StringResolver.cs --- LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs b/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs index ca0bb9437..19cd02753 100644 --- a/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs +++ b/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs @@ -22,7 +22,9 @@ public string ResolveMethod(MethodInfo method) case "TrimStart": return "LTRIM(#)"; case "TrimEnd": return "RTRIM(#)"; case "ToUpper": return "UPPER(#)"; + case "ToUpperInvariant": return "UPPER(#)"; case "ToLower": return "LOWER(#)"; + case "ToLowerInvariant": return "LOWER(#)"; case "Replace": return "REPLACE(#, @0, @1)"; case "PadLeft": return "LPAD(#, @0, @1)"; case "RightLeft": return "RPAD(#, @0, @1)"; @@ -57,4 +59,4 @@ public string ResolveMember(MemberInfo member) public string ResolveCtor(ConstructorInfo ctor) => null; } -} \ No newline at end of file +} From f0571a39293b91c991f8c6bca075ea8690423932 Mon Sep 17 00:00:00 2001 From: Oleksii Datsiuk <58850773+oleksii-datsiuk@users.noreply.github.com> Date: Thu, 16 May 2024 19:45:29 +0300 Subject: [PATCH 05/18] Prevent using disposed snapshot --- LiteDB/Engine/Services/SnapShot.cs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/LiteDB/Engine/Services/SnapShot.cs b/LiteDB/Engine/Services/SnapShot.cs index 2f52af221..f8deaeba7 100644 --- a/LiteDB/Engine/Services/SnapShot.cs +++ b/LiteDB/Engine/Services/SnapShot.cs @@ -33,6 +33,8 @@ internal class Snapshot : IDisposable // local page cache - contains only pages about this collection (but do not contains CollectionPage - use this.CollectionPage) private readonly Dictionary _localPages = new Dictionary(); + private bool _disposed; + // expose public LockMode Mode => _mode; public string CollectionName => _collectionName; @@ -89,6 +91,8 @@ public Snapshot( /// public IEnumerable GetWritablePages(bool dirty, bool includeCollectionPage) { + ENSURE(!_disposed, "the snapshot is disposed"); + // if snapshot is read only, just exit if (_mode == LockMode.Read) yield break; @@ -110,6 +114,8 @@ public IEnumerable GetWritablePages(bool dirty, bool includeCollection /// public void Clear() { + ENSURE(!_disposed, "the snapshot is disposed"); + // release pages only if snapshot are read only if (_mode == LockMode.Read) { @@ -128,9 +134,16 @@ public void Clear() /// public void Dispose() { + if (_disposed) + { + return; + } + // release all data/index pages this.Clear(); + _disposed = true; + // release collection page (in read mode) if (_mode == LockMode.Read && _collectionPage != null) { @@ -160,6 +173,7 @@ public T GetPage(uint pageID) public T GetPage(uint pageID, out FileOrigin origin, out long position, out int walVersion) where T : BasePage { + ENSURE(!_disposed, "the snapshot is disposed"); ENSURE(pageID <= _header.LastPageID, "request page must be less or equals lastest page in data file"); // check for header page (return header single instance) @@ -259,6 +273,8 @@ private T ReadPage(uint pageID, out FileOrigin origin, out long position, out /// public DataPage GetFreeDataPage(int bytesLength) { + ENSURE(!_disposed, "the snapshot is disposed"); + var length = bytesLength + BasePage.SLOT_SIZE; // add +4 bytes for footer slot // get minimum slot to check for free page. Returns -1 if need NewPage @@ -292,6 +308,8 @@ public DataPage GetFreeDataPage(int bytesLength) /// public IndexPage GetFreeIndexPage(int bytesLength, ref uint freeIndexPageList) { + ENSURE(!_disposed, "the snapshot is disposed"); + IndexPage page; // if there is not page in list pages, create new page @@ -318,6 +336,7 @@ public IndexPage GetFreeIndexPage(int bytesLength, ref uint freeIndexPageList) public T NewPage() where T : BasePage { + ENSURE(!_disposed, "the snapshot is disposed"); ENSURE(_collectionPage == null, typeof(T) == typeof(CollectionPage), "if no collection page defined yet, must be first request"); ENSURE(typeof(T) == typeof(CollectionPage), _collectionPage == null, "there is no new collection page if page already exists"); @@ -392,6 +411,8 @@ public T NewPage() /// public void AddOrRemoveFreeDataList(DataPage page) { + ENSURE(!_disposed, "the snapshot is disposed"); + var newSlot = DataPage.FreeIndexSlot(page.FreeBytes); var initialSlot = page.PageListSlot; @@ -423,6 +444,8 @@ public void AddOrRemoveFreeDataList(DataPage page) /// public void AddOrRemoveFreeIndexList(IndexPage page, ref uint startPageID) { + ENSURE(!_disposed, "the snapshot is disposed"); + var newSlot = IndexPage.FreeIndexSlot(page.FreeBytes); var isOnList = page.PageListSlot == 0; var mustKeep = newSlot == 0; @@ -567,6 +590,8 @@ private void DeletePage(T page) /// public void DropCollection(Action safePoint) { + ENSURE(!_disposed, "the snapshot is disposed"); + var indexer = new IndexService(this, _header.Pragmas.Collation, _disk.MAX_ITEMS_COUNT); // CollectionPage will be last deleted page (there is no NextPageID from CollectionPage) From ece59bbc1ab91d38ac26dd5e1614f57e6cc538fd Mon Sep 17 00:00:00 2001 From: JKamsker Date: Sat, 1 Jun 2024 19:03:34 +0200 Subject: [PATCH 06/18] Fixed #2471, #2435, #2483 : Releasing read lock when done reading --- LiteDB.Tests/Issues/Issue2471_Test.cs | 49 +++++++++++++++++++ LiteDB/Engine/Query/QueryExecutor.cs | 28 +++++------ .../Utils/Extensions/EnumerableExtensions.cs | 27 ++++++++++ 3 files changed, 87 insertions(+), 17 deletions(-) create mode 100644 LiteDB.Tests/Issues/Issue2471_Test.cs create mode 100644 LiteDB/Utils/Extensions/EnumerableExtensions.cs diff --git a/LiteDB.Tests/Issues/Issue2471_Test.cs b/LiteDB.Tests/Issues/Issue2471_Test.cs new file mode 100644 index 000000000..c6d9ff1a2 --- /dev/null +++ b/LiteDB.Tests/Issues/Issue2471_Test.cs @@ -0,0 +1,49 @@ +using FluentAssertions; + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +using Xunit; + +namespace LiteDB.Tests.Issues; + +public class Issue2471_Test +{ + [Fact] + public void TestFragmentDB_FindByIDException() + { + using var db = new LiteDatabase(":memory:"); + var collection = db.GetCollection("fragtest"); + + var fragment = new object { }; + var id = collection.Insert(fragment); + + id.Should().BeGreaterThan(0); + + var frag2 = collection.FindById(id); + frag2.Should().NotBeNull(); + + Action act = () => db.Checkpoint(); + + act.Should().NotThrow(); + } + + [Fact] + public void MultipleReadCleansUpTransaction() + { + using var database = new LiteDatabase(":memory:"); + + var collection = database.GetCollection("test"); + collection.Insert(new BsonDocument { ["_id"] = 1 }); + + for (int i = 0; i < 500; i++) + { + collection.FindById(1); + } + } +} \ No newline at end of file diff --git a/LiteDB/Engine/Query/QueryExecutor.cs b/LiteDB/Engine/Query/QueryExecutor.cs index 10d8e034b..522eb44af 100644 --- a/LiteDB/Engine/Query/QueryExecutor.cs +++ b/LiteDB/Engine/Query/QueryExecutor.cs @@ -1,6 +1,9 @@ -using System; +using LiteDB.Utils.Extensions; + +using System; using System.Collections.Generic; using System.Linq; + using static LiteDB.Constants; namespace LiteDB.Engine @@ -71,8 +74,14 @@ internal BsonDataReader ExecuteQuery(bool executionPlan) transaction.OpenCursors.Add(_cursor); + var enumerable = RunQuery(); + if (isNew) + { + enumerable = enumerable.OnDispose(() => _monitor.ReleaseTransaction(transaction)); + } + // return new BsonDataReader with IEnumerable source - return new BsonDataReader(RunQuery(), _collection, _state); + return new BsonDataReader(enumerable, _collection, _state); IEnumerable RunQuery() { @@ -89,11 +98,6 @@ IEnumerable RunQuery() transaction.OpenCursors.Remove(_cursor); - if (isNew) - { - _monitor.ReleaseTransaction(transaction); - } - yield break; } @@ -111,11 +115,6 @@ IEnumerable RunQuery() transaction.OpenCursors.Remove(_cursor); - if (isNew) - { - _monitor.ReleaseTransaction(transaction); - } - yield break; } @@ -169,11 +168,6 @@ IEnumerable RunQuery() _cursor.Elapsed.Stop(); transaction.OpenCursors.Remove(_cursor); - - if (isNew) - { - _monitor.ReleaseTransaction(transaction); - } }; } diff --git a/LiteDB/Utils/Extensions/EnumerableExtensions.cs b/LiteDB/Utils/Extensions/EnumerableExtensions.cs new file mode 100644 index 000000000..b79c1af12 --- /dev/null +++ b/LiteDB/Utils/Extensions/EnumerableExtensions.cs @@ -0,0 +1,27 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace LiteDB.Utils.Extensions +{ + internal static class EnumerableExtensions + { + // calls method on dispose + public static IEnumerable OnDispose(this IEnumerable source, Action onDispose) + { + try + { + foreach (var item in source) + { + yield return item; + } + } + finally + { + onDispose(); + } + } + } +} \ No newline at end of file From 72b1ac28d29100d0fcb380023dba4cff89015cd4 Mon Sep 17 00:00:00 2001 From: JKamsker <11245306+JKamsker@users.noreply.github.com> Date: Tue, 4 Jun 2024 17:48:42 +0200 Subject: [PATCH 07/18] Bump langversion to be able to use newer language features while all TargetFrameworks still build and run --- LiteDB/LiteDB.csproj | 1 + 1 file changed, 1 insertion(+) diff --git a/LiteDB/LiteDB.csproj b/LiteDB/LiteDB.csproj index 721395e91..4105be0f3 100644 --- a/LiteDB/LiteDB.csproj +++ b/LiteDB/LiteDB.csproj @@ -28,6 +28,7 @@ true LiteDB.snk true + 8.0