diff --git a/LiteDB.Tests/Database/Contains_Tests.cs b/LiteDB.Tests/Database/Contains_Tests.cs new file mode 100644 index 000000000..68dce243a --- /dev/null +++ b/LiteDB.Tests/Database/Contains_Tests.cs @@ -0,0 +1,96 @@ +using LiteDB; +using FluentAssertions; +using Xunit; +using System.IO; +using System.Linq; +using System.Collections.Generic; +using System; + +namespace LiteDB.Tests.Database +{ + public class Contains_Tests + { + [Fact] + public void ArrayContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + Array = new int[] { randomValue } + }); + + var result = collection.Find(i => i.Array.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + [Fact] + public void EnumerableAssignedArrayContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + Enumerable = new int[] { randomValue } + }); + + var result = collection.Find(i => i.Enumerable.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + [Fact] + public void EnumerableAssignedListContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + Enumerable = new List { randomValue } + }); + + var result = collection.Find(i => i.Enumerable.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + [Fact] + public void ListContains_ShouldHaveCount1() + { + var random = new Random(); + var randomValue = random.Next(); + + using(var database = new LiteDatabase(new MemoryStream())) + { + var collection = database.GetCollection(); + collection.Insert(new ItemWithEnumerable + { + List = new List { randomValue } + }); + + var result = collection.Find(i => i.List.Contains(randomValue)).ToList(); + result.Should().HaveCount(1); + } + } + + public class ItemWithEnumerable + { + public int[] Array { get; set; } + public IEnumerable Enumerable { get; set; } + public IList List { get; set; } + } + } +} \ No newline at end of file diff --git a/LiteDB.Tests/Internals/Extensions_Test.cs b/LiteDB.Tests/Internals/Extensions_Test.cs new file mode 100644 index 000000000..ef5243d27 --- /dev/null +++ b/LiteDB.Tests/Internals/Extensions_Test.cs @@ -0,0 +1,44 @@ +using LiteDB.Utils.Extensions; + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +using Xunit; + +namespace LiteDB.Tests.Internals; + +public class Extensions_Test +{ + // Asserts that chained IEnumerable.OnDispose(()=> { }) calls the action on dispose, even when chained + [Fact] + public void EnumerableExtensions_OnDispose() + { + var disposed = false; + var disposed1 = false; + var enumerable = new[] { 1, 2, 3 }.OnDispose(() => disposed = true).OnDispose(() => disposed1 = true); + + foreach (var item in enumerable) + { + // do nothing + } + + Assert.True(disposed); + Assert.True(disposed1); + } + + // tests IDisposable StartDisposable(this Stopwatch stopwatch) + [Fact] + public async Task StopWatchExtensions_StartDisposable() + { + var stopwatch = new System.Diagnostics.Stopwatch(); + using (stopwatch.StartDisposable()) + { + await Task.Delay(100); + } + + Assert.True(stopwatch.ElapsedMilliseconds > 0); + } +} \ No newline at end of file diff --git a/LiteDB.Tests/Issues/Issue2265_Tests.cs b/LiteDB.Tests/Issues/Issue2265_Tests.cs new file mode 100644 index 000000000..347753348 --- /dev/null +++ b/LiteDB.Tests/Issues/Issue2265_Tests.cs @@ -0,0 +1,47 @@ +using System; + +using Xunit; + +namespace LiteDB.Tests.Issues; + +// issue 2265 +public class Issue2265_Tests +{ + public class Weights + { + public int Id { get; set; } = 0; + + // comment out [BsonRef] and the the test works + [BsonRef("weights")] + public Weights[] Parents { get; set; } + + public Weights(int id, Weights[] parents) + { + Id = id; + Parents = parents; + } + + public Weights() + { + Id = 0; + Parents = Array.Empty(); + } + } + + [Fact] + public void Test() + { + using (var db = new LiteDatabase(":memory:")) + { + var c = db.GetCollection("weights"); + Weights? w = c.FindOne(x => true); + if (w == null) + { + w = new Weights(); + c.Insert(w); + } + + //return w; + } + } +} \ No newline at end of file diff --git a/LiteDB.Tests/Issues/Issue2298_Tests.cs b/LiteDB.Tests/Issues/Issue2298_Tests.cs new file mode 100644 index 000000000..c4d4c5a97 --- /dev/null +++ b/LiteDB.Tests/Issues/Issue2298_Tests.cs @@ -0,0 +1,67 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Text.Json; +using System.Threading.Tasks; + +using Xunit; + +namespace LiteDB.Tests.Issues; + +public class Issue2298_Tests +{ + public struct Mass + { + public enum Units + { Pound, Kilogram } + + public Mass(double value, Units unit) + { Value = value; Unit = unit; } + + public double Value { get; init; } + public Units Unit { get; init; } + } + + public class QuantityRange + { + public QuantityRange(double min, double max, Enum unit) + { Min = min; Max = max; Unit = unit; } + + public double Min { get; init; } + public double Max { get; init; } + public Enum Unit { get; init; } + } + + public static QuantityRange MassRangeBuilder(BsonDocument document) + { + var doc = JsonDocument.Parse(document.ToString()).RootElement; + var min = doc.GetProperty(nameof(QuantityRange.Min)).GetDouble(); + var max = doc.GetProperty(nameof(QuantityRange.Max)).GetDouble(); + var unit = Enum.Parse(doc.GetProperty(nameof(QuantityRange.Unit)).GetString()); + + var restored = new QuantityRange(min, max, unit); + return restored; + } + + [Fact] + public void We_Dont_Need_Ctor() + { + BsonMapper.Global.RegisterType>( + serialize: (range) => new BsonDocument + { + { nameof(QuantityRange.Min), range.Min }, + { nameof(QuantityRange.Max), range.Max }, + { nameof(QuantityRange.Unit), range.Unit.ToString() } + }, + deserialize: (document) => MassRangeBuilder(document as BsonDocument) + ); + + var range = new QuantityRange(100, 500, Mass.Units.Pound); + var filename = "Demo.DB"; + var DB = new LiteDatabase(filename); + var collection = DB.GetCollection>("DEMO"); + collection.Insert(range); + var restored = collection.FindAll().First(); + } +} \ No newline at end of file diff --git a/LiteDB.Tests/Issues/Issue2471_Test.cs b/LiteDB.Tests/Issues/Issue2471_Test.cs new file mode 100644 index 000000000..1f50e1aff --- /dev/null +++ b/LiteDB.Tests/Issues/Issue2471_Test.cs @@ -0,0 +1,97 @@ +using FluentAssertions; + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +using Xunit; + +namespace LiteDB.Tests.Issues; + +public class Issue2471_Test +{ + [Fact] + public void TestFragmentDB_FindByIDException() + { + using var db = new LiteDatabase(":memory:"); + var collection = db.GetCollection("fragtest"); + + var fragment = new object { }; + var id = collection.Insert(fragment); + + id.Should().BeGreaterThan(0); + + var frag2 = collection.FindById(id); + frag2.Should().NotBeNull(); + + Action act = () => db.Checkpoint(); + + act.Should().NotThrow(); + } + + [Fact] + public void MultipleReadCleansUpTransaction() + { + using var database = new LiteDatabase(":memory:"); + + var collection = database.GetCollection("test"); + collection.Insert(new BsonDocument { ["_id"] = 1 }); + + for (int i = 0; i < 500; i++) + { + collection.FindById(1); + } + } + + #region Model + + public class User + { + public int Id { get; set; } + public string Name { get; set; } + public int[] Phones { get; set; } + public List
Addresses { get; set; } + } + + public class Address + { + public string Street { get; set; } + } + + #endregion Model + + // Copied from IndexMultiKeyIndex, but this time we ensure that the lock is released by calling db.Checkpoint() + [Fact] + public void Ensure_Query_GetPlan_Releases_Lock() + { + using var db = new LiteDatabase(new MemoryStream()); + var col = db.GetCollection(); + + col.Insert(new User { Name = "John Doe", Phones = new int[] { 1, 3, 5 }, Addresses = new List
{ new Address { Street = "Av.1" }, new Address { Street = "Av.3" } } }); + col.Insert(new User { Name = "Joana Mark", Phones = new int[] { 1, 4 }, Addresses = new List
{ new Address { Street = "Av.3" } } }); + + // create indexes + col.EnsureIndex(x => x.Phones); + col.EnsureIndex(x => x.Addresses.Select(z => z.Street)); + + // testing indexes expressions + var indexes = db.GetCollection("$indexes").FindAll().ToArray(); + + indexes[1]["expression"].AsString.Should().Be("$.Phones[*]"); + indexes[2]["expression"].AsString.Should().Be("MAP($.Addresses[*]=>@.Street)"); + + // doing Phone query + var queryPhone = col.Query() + .Where(x => x.Phones.Contains(3)); + + var planPhone = queryPhone.GetPlan(); + + Action act = () => db.Checkpoint(); + + act.Should().NotThrow(); + } +} \ No newline at end of file diff --git a/LiteDB.Tests/Issues/Pull2468_Tests.cs b/LiteDB.Tests/Issues/Pull2468_Tests.cs new file mode 100644 index 000000000..5e5f35244 --- /dev/null +++ b/LiteDB.Tests/Issues/Pull2468_Tests.cs @@ -0,0 +1,70 @@ +using FluentAssertions; + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +using Xunit; + +using static LiteDB.Tests.Issues.Issue1838_Tests; + +namespace LiteDB.Tests.Issues; + +public class Pull2468_Tests +{ + // tests if lowerinvariant works + [Fact] + public void Supports_LowerInvariant() + { + using var db = new LiteDatabase(":memory:"); + var collection = db.GetCollection(nameof(TestType)); + + collection.Insert(new TestType() + { + Foo = "Abc", + Timestamp = DateTimeOffset.UtcNow, + }); + + collection.Insert(new TestType() + { + Foo = "Def", + Timestamp = DateTimeOffset.UtcNow, + }); + + var result = collection.Query() + .Where(x => x.Foo.ToLowerInvariant() == "abc") + .ToList(); + + Assert.NotNull(result); + Assert.Single(result); + } + + // tests if upperinvariant works + [Fact] + public void Supports_UpperInvariant() + { + using var db = new LiteDatabase(":memory:"); + var collection = db.GetCollection(nameof(TestType)); + + collection.Insert(new TestType() + { + Foo = "Abc", + Timestamp = DateTimeOffset.UtcNow, + }); + + collection.Insert(new TestType() + { + Foo = "Def", + Timestamp = DateTimeOffset.UtcNow, + }); + + var result = collection.Query() + .Where(x => x.Foo.ToUpperInvariant() == "ABC") + .ToList(); + + Assert.NotNull(result); + Assert.Single(result); + } +} \ No newline at end of file diff --git a/LiteDB/Client/Mapper/BsonMapper.cs b/LiteDB/Client/Mapper/BsonMapper.cs index 1b963f560..c613e7bc3 100644 --- a/LiteDB/Client/Mapper/BsonMapper.cs +++ b/LiteDB/Client/Mapper/BsonMapper.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Collections.Concurrent; using System.Collections.Generic; @@ -240,9 +240,7 @@ internal EntityMapper GetEntityMapper(Type type) lock (_entities) { if (!_entities.TryGetValue(type, out mapper)) - { - return _entities[type] = this.BuildEntityMapper(type); - } + return this.BuildAddEntityMapper(type); } } @@ -253,9 +251,10 @@ internal EntityMapper GetEntityMapper(Type type) /// Use this method to override how your class can be, by default, mapped from entity to Bson document. /// Returns an EntityMapper from each requested Type /// - protected virtual EntityMapper BuildEntityMapper(Type type) + protected virtual EntityMapper BuildAddEntityMapper(Type type) { var mapper = new EntityMapper(type); + _entities[type] = mapper;//direct add into entities, to solove the DBRef [ GetEntityMapper > BuildAddEntityMapper > RegisterDbRef > RegisterDbRefItem > GetEntityMapper ] Loop call recursion,we stoped at here and GetEntityMapper's _entities.TryGetValue var idAttr = typeof(BsonIdAttribute); var ignoreAttr = typeof(BsonIgnoreAttribute); diff --git a/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs b/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs index ca0bb9437..35b99c8fb 100644 --- a/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs +++ b/LiteDB/Client/Mapper/Linq/TypeResolver/StringResolver.cs @@ -5,6 +5,7 @@ using System.Linq.Expressions; using System.Reflection; using System.Text; + using static LiteDB.Constants; namespace LiteDB @@ -22,7 +23,9 @@ public string ResolveMethod(MethodInfo method) case "TrimStart": return "LTRIM(#)"; case "TrimEnd": return "RTRIM(#)"; case "ToUpper": return "UPPER(#)"; + case "ToUpperInvariant": return "UPPER(#)"; case "ToLower": return "LOWER(#)"; + case "ToLowerInvariant": return "LOWER(#)"; case "Replace": return "REPLACE(#, @0, @1)"; case "PadLeft": return "LPAD(#, @0, @1)"; case "RightLeft": return "RPAD(#, @0, @1)"; diff --git a/LiteDB/Engine/Disk/DiskService.cs b/LiteDB/Engine/Disk/DiskService.cs index d414da4e1..052ae18e6 100644 --- a/LiteDB/Engine/Disk/DiskService.cs +++ b/LiteDB/Engine/Disk/DiskService.cs @@ -1,10 +1,7 @@ using System; -using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; -using System.Text; using System.Threading; -using System.Threading.Tasks; using static LiteDB.Constants; namespace LiteDB.Engine @@ -29,7 +26,7 @@ internal class DiskService : IDisposable private long _logLength; public DiskService( - EngineSettings settings, + EngineSettings settings, EngineState state, int[] memorySegmentSizes) { @@ -261,7 +258,7 @@ public IEnumerable ReadFull(FileOrigin origin) var bytesRead = stream.Read(buffer, 0, PAGE_SIZE); - ENSURE(bytesRead == PAGE_SIZE, $"ReadFull must read PAGE_SIZE bytes [{bytesRead}]"); + ENSURE(bytesRead == PAGE_SIZE, "ReadFull must read PAGE_SIZE bytes [{0}]", bytesRead); yield return new PageBuffer(buffer, 0, 0) { diff --git a/LiteDB/Engine/Disk/Streams/AesStream.cs b/LiteDB/Engine/Disk/Streams/AesStream.cs index 90f4edc80..fce5cca3b 100644 --- a/LiteDB/Engine/Disk/Streams/AesStream.cs +++ b/LiteDB/Engine/Disk/Streams/AesStream.cs @@ -22,7 +22,7 @@ public class AesStream : Stream private readonly byte[] _decryptedZeroes = new byte[16]; - private static readonly byte[] _emptyContent = new byte[PAGE_SIZE - 1 - 16]; // 1 for aes indicator + 16 for salt + private static readonly byte[] _emptyContent = new byte[PAGE_SIZE - 1 - 16]; // 1 for aes indicator + 16 for salt public byte[] Salt { get; } @@ -111,7 +111,7 @@ public AesStream(string password, Stream stream) // check whether bytes 32 to 64 is empty. This indicates LiteDb was unable to write encrypted 1s during last attempt. _stream.Read(checkBuffer, 0, checkBuffer.Length); isNew = checkBuffer.All(x => x == 0); - + // reset checkBuffer and stream position Array.Clear(checkBuffer, 0, checkBuffer.Length); _stream.Position = 32; @@ -160,7 +160,7 @@ public AesStream(string password, Stream stream) /// public override int Read(byte[] array, int offset, int count) { - ENSURE(this.Position % PAGE_SIZE == 0, $"AesRead: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); + ENSURE(this.Position % PAGE_SIZE == 0, "AesRead: position must be in PAGE_SIZE module. Position={0}, File={1}", this.Position, _name); var r = _reader.Read(array, offset, count); @@ -181,7 +181,7 @@ public override int Read(byte[] array, int offset, int count) public override void Write(byte[] array, int offset, int count) { ENSURE(count == PAGE_SIZE || count == 1, "buffer size must be PAGE_SIZE"); - ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, $"AesWrite: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}"); + ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, "AesWrite: position must be in PAGE_SIZE module. Position={0}, File={1}", this.Position, _name); _writer.Write(array, offset, count); } diff --git a/LiteDB/Engine/FileReader/FileReaderV8.cs b/LiteDB/Engine/FileReader/FileReaderV8.cs index a53a90bcf..3230c9d57 100644 --- a/LiteDB/Engine/FileReader/FileReaderV8.cs +++ b/LiteDB/Engine/FileReader/FileReaderV8.cs @@ -117,7 +117,7 @@ public IEnumerable GetDocuments(string collection) var colID = _collections[collection]; if (!_collectionsDataPages.ContainsKey(colID)) yield break; - + var dataPages = _collectionsDataPages[colID]; var uniqueIDs = new HashSet(); @@ -156,8 +156,8 @@ public IEnumerable GetDocuments(string collection) // empty slot if (position == 0) continue; - ENSURE(position > 0 && length > 0, $"Invalid footer ref position {position} with length {length}"); - ENSURE(position + length < PAGE_SIZE, $"Invalid footer ref position {position} with length {length}"); + ENSURE(position > 0 && length > 0, "Invalid footer ref position {0} with length {1}", position, length); + ENSURE(position + length < PAGE_SIZE, "Invalid footer ref position {0} with length {1}", position, length); // get segment slice var segment = buffer.Slice(position, length); @@ -183,8 +183,8 @@ public IEnumerable GetDocuments(string collection) var nextBuffer = nextPage.Value.Buffer; // make page validations - ENSURE(nextPage.Value.PageType == PageType.Data, $"Invalid PageType (excepted Data, get {nextPage.Value.PageType})"); - ENSURE(nextPage.Value.ColID == colID, $"Invalid ColID in this page (expected {colID}, get {nextPage.Value.ColID})"); + ENSURE(nextPage.Value.PageType == PageType.Data, "Invalid PageType (excepted Data, get {0})", nextPage.Value.PageType); + ENSURE(nextPage.Value.ColID == colID, "Invalid ColID in this page (expected {0}, get {1})", colID, nextPage.Value.ColID); ENSURE(nextPage.Value.ItemsCount > 0, "Page with no items count"); // read slot address @@ -196,7 +196,7 @@ public IEnumerable GetDocuments(string collection) length = nextBuffer.ReadUInt16(lengthAddr); // empty slot - ENSURE(length > 0, $"Last DataBlock request a next extend to {nextBlock}, but this block are empty footer"); + ENSURE(length > 0, "Last DataBlock request a next extend to {0}, but this block are empty footer", nextBlock); // get segment slice segment = nextBuffer.Slice(position, length); @@ -204,7 +204,7 @@ public IEnumerable GetDocuments(string collection) nextBlock = segment.ReadPageAddress(DataBlock.P_NEXT_BLOCK); data = segment.Slice(DataBlock.P_BUFFER, segment.Count - DataBlock.P_BUFFER); - ENSURE(extend == true, $"Next datablock always be an extend. Invalid data block {nextBlock}"); + ENSURE(extend == true, "Next datablock always be an extend. Invalid data block {0}", nextBlock); // write data on memorystream @@ -219,8 +219,8 @@ public IEnumerable GetDocuments(string collection) var docResult = r.ReadDocument(); var id = docResult.Value["_id"]; - ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), $"Invalid _id value: {id}"); - ENSURE(uniqueIDs.Contains(id) == false, $"Duplicated _id value: {id}"); + ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), "Invalid _id value: {0}", id); + ENSURE(uniqueIDs.Contains(id) == false, "Duplicated _id value: {0}", id); uniqueIDs.Add(id); @@ -279,7 +279,7 @@ private void LoadDataPages() var header = this.ReadPage(0, out var pageInfo).GetValue(); var lastPageID = header.Buffer.ReadUInt32(HeaderPage.P_LAST_PAGE_ID); //TOFO: tentar não usar esse valor como referencia (varrer tudo) - ENSURE(lastPageID <= _maxPageID, $"LastPageID {lastPageID} should be less or equals to maxPageID {_maxPageID}"); + ENSURE(lastPageID <= _maxPageID, "LastPageID {0} should be less or equals to maxPageID {1}", lastPageID, _maxPageID); for (uint i = 0; i <= lastPageID; i++) { @@ -398,8 +398,8 @@ private void LoadIndexes() position += 15; // head 5 bytes, tail 5 bytes, reserved 1 byte, freeIndexPageList 4 bytes - ENSURE(!string.IsNullOrEmpty(name), $"Index name can't be empty (collection {collection.Key} - index: {i})"); - ENSURE(!string.IsNullOrEmpty(expr), $"Index expression can't be empty (collection {collection.Key} - index: {i})"); + ENSURE(!string.IsNullOrEmpty(name), "Index name can't be empty (collection {0} - index: {1})", collection.Key, i); + ENSURE(!string.IsNullOrEmpty(expr), "Index expression can't be empty (collection {0} - index: {1})", collection.Key, i); var indexInfo = new IndexInfo { @@ -481,7 +481,7 @@ private void LoadIndexMap() pageInfo.PageID = pageID; pageInfo.ColID = buffer.ReadUInt32(BasePage.P_COL_ID); - ENSURE(read == PAGE_SIZE, $"Page position {_logStream} read only than {read} bytes (instead {PAGE_SIZE})"); + ENSURE(read == PAGE_SIZE, "Page position {0} read only than {1} bytes (instead {2})", _logStream, read, PAGE_SIZE); var position = new PagePosition(pageID, currentPosition); @@ -515,7 +515,7 @@ private void LoadIndexMap() { var mapIndexPages = transactions[transactionID]; - // update + // update foreach (var page in mapIndexPages) { _logIndexMap[page.PageID] = page.Position; @@ -532,7 +532,7 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo) try { - ENSURE(pageID <= _maxPageID, $"PageID: {pageID} should be less then or equals to maxPageID: {_maxPageID}"); + ENSURE(pageID <= _maxPageID, "PageID: {0} should be less then or equals to maxPageID: {1}", pageID, _maxPageID); var pageBuffer = new PageBuffer(new byte[PAGE_SIZE], 0, PAGE_SIZE); Stream stream; @@ -556,13 +556,13 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo) read = stream.Read(pageBuffer.Array, pageBuffer.Offset, pageBuffer.Count); - ENSURE(read == PAGE_SIZE, $"Page position {stream.Position} read only than {read} bytes (instead {PAGE_SIZE})"); + ENSURE(read == PAGE_SIZE, "Page position {0} read only than {1} bytes (instead {2})", stream.Position, read, PAGE_SIZE); var page = new BasePage(pageBuffer); pageInfo.ColID = page.ColID; - ENSURE(page.PageID == pageID, $"Expect read pageID: {pageID} but header contains pageID: {page.PageID}"); + ENSURE(page.PageID == pageID, "Expect read pageID: {0} but header contains pageID: {1}", pageID, page.PageID); return page; } diff --git a/LiteDB/Engine/Pages/BasePage.cs b/LiteDB/Engine/Pages/BasePage.cs index 92b645206..09f87c851 100644 --- a/LiteDB/Engine/Pages/BasePage.cs +++ b/LiteDB/Engine/Pages/BasePage.cs @@ -102,8 +102,8 @@ internal class BasePage /// Get how many bytes are used in footer page at this moment /// ((HighestIndex + 1) * 4 bytes per slot: [2 for position, 2 for length]) /// - public int FooterSize => - (this.HighestIndex == byte.MaxValue ? + public int FooterSize => + (this.HighestIndex == byte.MaxValue ? 0 : // no items in page ((this.HighestIndex + 1) * SLOT_SIZE)); // 4 bytes PER item (2 to position + 2 to length) - need consider HighestIndex used @@ -282,8 +282,8 @@ public BufferSlice Get(byte index) var position = _buffer.ReadUInt16(positionAddr); var length = _buffer.ReadUInt16(lengthAddr); - ENSURE(this.IsValidPos(position), $"invalid segment position in index footer: {ToString()}/{index}"); - ENSURE(this.IsValidLen(length), $"invalid segment length in index footer: {ToString()}/{index}"); + ENSURE(this.IsValidPos(position), "invalid segment position in index footer: {0}/{1}", this, index); + ENSURE(this.IsValidLen(length), "invalid segment length in index footer: {0}/{1}", this, index); // return buffer slice with content only data return _buffer.Slice(position, length); @@ -408,7 +408,7 @@ public void Delete(byte index) this.NextFreePosition = position; } else - { + { // if segment is in middle of the page, add this blocks as fragment block this.FragmentedBytes += length; } @@ -475,7 +475,7 @@ public BufferSlice Update(byte index, ushort bytesLength) if (isLastSegment) { - // if is at end of page, must get back unused blocks + // if is at end of page, must get back unused blocks this.NextFreePosition -= diff; } else diff --git a/LiteDB/Engine/Pages/DataPage.cs b/LiteDB/Engine/Pages/DataPage.cs index 842bb49d4..bbda32ba9 100644 --- a/LiteDB/Engine/Pages/DataPage.cs +++ b/LiteDB/Engine/Pages/DataPage.cs @@ -1,6 +1,4 @@ using System.Collections.Generic; -using System.IO; -using System.Linq; using static LiteDB.Constants; namespace LiteDB.Engine @@ -16,7 +14,7 @@ internal class DataPage : BasePage public DataPage(PageBuffer buffer) : base(buffer) { - ENSURE(this.PageType == PageType.Data, $"Page type must be data page: {PageType}"); + ENSURE(this.PageType == PageType.Data, "Page type must be data page: {0}", PageType); if (this.PageType != PageType.Data) throw LiteException.InvalidPageType(PageType.Data, this); } @@ -108,7 +106,7 @@ public IEnumerable GetBlocks() /// A slot number between 0 and 4 public static byte FreeIndexSlot(int freeBytes) { - ENSURE(freeBytes >= 0, $"FreeBytes must be positive: {freeBytes}"); + ENSURE(freeBytes >= 0, "FreeBytes must be positive: {0}", freeBytes); for (var i = 0; i < _freePageSlots.Length; i++) { diff --git a/LiteDB/Engine/Query/QueryExecutor.cs b/LiteDB/Engine/Query/QueryExecutor.cs index 10d8e034b..d96956043 100644 --- a/LiteDB/Engine/Query/QueryExecutor.cs +++ b/LiteDB/Engine/Query/QueryExecutor.cs @@ -1,6 +1,9 @@ -using System; +using LiteDB.Utils.Extensions; + +using System; using System.Collections.Generic; using System.Linq; + using static LiteDB.Constants; namespace LiteDB.Engine @@ -22,14 +25,14 @@ internal class QueryExecutor private readonly IEnumerable _source; public QueryExecutor( - LiteEngine engine, + LiteEngine engine, EngineState state, - TransactionMonitor monitor, - SortDisk sortDisk, + TransactionMonitor monitor, + SortDisk sortDisk, DiskService disk, - EnginePragmas pragmas, - string collection, - Query query, + EnginePragmas pragmas, + string collection, + Query query, IEnumerable source) { _engine = engine; @@ -71,8 +74,17 @@ internal BsonDataReader ExecuteQuery(bool executionPlan) transaction.OpenCursors.Add(_cursor); + var enumerable = RunQuery(); + + enumerable = enumerable.OnDispose(() => transaction.OpenCursors.Remove(_cursor)); + + if (isNew) + { + enumerable = enumerable.OnDispose(() => _monitor.ReleaseTransaction(transaction)); + } + // return new BsonDataReader with IEnumerable source - return new BsonDataReader(RunQuery(), _collection, _state); + return new BsonDataReader(enumerable, _collection, _state); IEnumerable RunQuery() { @@ -87,13 +99,6 @@ IEnumerable RunQuery() yield return _query.Select.ExecuteScalar(_pragmas.Collation).AsDocument; } - transaction.OpenCursors.Remove(_cursor); - - if (isNew) - { - _monitor.ReleaseTransaction(transaction); - } - yield break; } @@ -108,14 +113,6 @@ IEnumerable RunQuery() if (executionPlan) { yield return queryPlan.GetExecutionPlan(); - - transaction.OpenCursors.Remove(_cursor); - - if (isNew) - { - _monitor.ReleaseTransaction(transaction); - } - yield break; } @@ -125,8 +122,8 @@ IEnumerable RunQuery() // get current query pipe: normal or groupby pipe var pipe = queryPlan.GetPipe(transaction, snapshot, _sortDisk, _pragmas, _disk.MAX_ITEMS_COUNT); - // start cursor elapsed timer - _cursor.Elapsed.Start(); + // start cursor elapsed timer which stops on dispose + using var _ = _cursor.Elapsed.StartDisposable(); using (var enumerator = pipe.Pipe(nodes, queryPlan).GetEnumerator()) { @@ -164,16 +161,6 @@ IEnumerable RunQuery() } } } - - // stop cursor elapsed - _cursor.Elapsed.Stop(); - - transaction.OpenCursors.Remove(_cursor); - - if (isNew) - { - _monitor.ReleaseTransaction(transaction); - } }; } diff --git a/LiteDB/Engine/Services/DataService.cs b/LiteDB/Engine/Services/DataService.cs index 499968b4e..9ef48772c 100644 --- a/LiteDB/Engine/Services/DataService.cs +++ b/LiteDB/Engine/Services/DataService.cs @@ -165,7 +165,7 @@ public IEnumerable Read(PageAddress address) while (address != PageAddress.Empty) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in data Read({address})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in data Read({0})", address); var dataPage = _snapshot.GetPage(address.PageID); diff --git a/LiteDB/Engine/Services/IndexService.cs b/LiteDB/Engine/Services/IndexService.cs index 9dcf6b771..6b8fb230d 100644 --- a/LiteDB/Engine/Services/IndexService.cs +++ b/LiteDB/Engine/Services/IndexService.cs @@ -79,10 +79,10 @@ public IndexNode AddNode(CollectionIndex index, BsonValue key, PageAddress dataB /// Insert a new node index inside an collection index. /// private IndexNode AddNode( - CollectionIndex index, - BsonValue key, - PageAddress dataBlock, - byte insertLevels, + CollectionIndex index, + BsonValue key, + PageAddress dataBlock, + byte insertLevels, IndexNode last) { // get a free index page for head note @@ -108,7 +108,7 @@ private IndexNode AddNode( // while: scan from left to right while (right.IsEmpty == false && right != index.Tail) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in AddNode({node.Position})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in AddNode({0})", node.Position); var rightNode = this.GetNode(right); @@ -206,7 +206,7 @@ public IEnumerable GetNodeList(PageAddress nodeAddress) while (node != null) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in GetNodeList({nodeAddress})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in GetNodeList({0})", nodeAddress); yield return node; @@ -225,7 +225,7 @@ public void DeleteAll(PageAddress pkAddress) while (node != null) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in DeleteAll({pkAddress})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in DeleteAll({0})", pkAddress); this.DeleteSingleNode(node, indexes[node.Slot]); @@ -246,7 +246,7 @@ public IndexNode DeleteList(PageAddress pkAddress, HashSet toDelete while (node != null) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in DeleteList({pkAddress})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in DeleteList({0})", pkAddress); if (toDelete.Contains(node.Position)) { @@ -333,7 +333,7 @@ public void DropIndex(CollectionIndex index) } #region Find - + /// /// Return all index nodes from an index /// @@ -344,7 +344,7 @@ public IEnumerable FindAll(CollectionIndex index, int order) while (!cur.GetNextPrev(0, order).IsEmpty) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in FindAll({index.Name})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in FindAll({0})", index.Name); cur = this.GetNode(cur.GetNextPrev(0, order)); @@ -356,7 +356,7 @@ public IEnumerable FindAll(CollectionIndex index, int order) } /// - /// Find first node that index match with value . + /// Find first node that index match with value . /// If index are unique, return unique value - if index are not unique, return first found (can start, middle or end) /// If not found but sibling = true and key are not found, returns next value index node (if order = Asc) or prev node (if order = Desc) /// @@ -371,7 +371,7 @@ public IndexNode Find(CollectionIndex index, BsonValue value, bool sibling, int while (right.IsEmpty == false) { - ENSURE(counter++ < _maxItemsCount, $"Detected loop in Find({index.Name}, {value})"); + ENSURE(counter++ < _maxItemsCount, "Detected loop in Find({0}, {1})", index.Name, value); var rightNode = this.GetNode(right); diff --git a/LiteDB/Engine/Services/SnapShot.cs b/LiteDB/Engine/Services/SnapShot.cs index 2f52af221..f8deaeba7 100644 --- a/LiteDB/Engine/Services/SnapShot.cs +++ b/LiteDB/Engine/Services/SnapShot.cs @@ -33,6 +33,8 @@ internal class Snapshot : IDisposable // local page cache - contains only pages about this collection (but do not contains CollectionPage - use this.CollectionPage) private readonly Dictionary _localPages = new Dictionary(); + private bool _disposed; + // expose public LockMode Mode => _mode; public string CollectionName => _collectionName; @@ -89,6 +91,8 @@ public Snapshot( /// public IEnumerable GetWritablePages(bool dirty, bool includeCollectionPage) { + ENSURE(!_disposed, "the snapshot is disposed"); + // if snapshot is read only, just exit if (_mode == LockMode.Read) yield break; @@ -110,6 +114,8 @@ public IEnumerable GetWritablePages(bool dirty, bool includeCollection /// public void Clear() { + ENSURE(!_disposed, "the snapshot is disposed"); + // release pages only if snapshot are read only if (_mode == LockMode.Read) { @@ -128,9 +134,16 @@ public void Clear() /// public void Dispose() { + if (_disposed) + { + return; + } + // release all data/index pages this.Clear(); + _disposed = true; + // release collection page (in read mode) if (_mode == LockMode.Read && _collectionPage != null) { @@ -160,6 +173,7 @@ public T GetPage(uint pageID) public T GetPage(uint pageID, out FileOrigin origin, out long position, out int walVersion) where T : BasePage { + ENSURE(!_disposed, "the snapshot is disposed"); ENSURE(pageID <= _header.LastPageID, "request page must be less or equals lastest page in data file"); // check for header page (return header single instance) @@ -259,6 +273,8 @@ private T ReadPage(uint pageID, out FileOrigin origin, out long position, out /// public DataPage GetFreeDataPage(int bytesLength) { + ENSURE(!_disposed, "the snapshot is disposed"); + var length = bytesLength + BasePage.SLOT_SIZE; // add +4 bytes for footer slot // get minimum slot to check for free page. Returns -1 if need NewPage @@ -292,6 +308,8 @@ public DataPage GetFreeDataPage(int bytesLength) /// public IndexPage GetFreeIndexPage(int bytesLength, ref uint freeIndexPageList) { + ENSURE(!_disposed, "the snapshot is disposed"); + IndexPage page; // if there is not page in list pages, create new page @@ -318,6 +336,7 @@ public IndexPage GetFreeIndexPage(int bytesLength, ref uint freeIndexPageList) public T NewPage() where T : BasePage { + ENSURE(!_disposed, "the snapshot is disposed"); ENSURE(_collectionPage == null, typeof(T) == typeof(CollectionPage), "if no collection page defined yet, must be first request"); ENSURE(typeof(T) == typeof(CollectionPage), _collectionPage == null, "there is no new collection page if page already exists"); @@ -392,6 +411,8 @@ public T NewPage() /// public void AddOrRemoveFreeDataList(DataPage page) { + ENSURE(!_disposed, "the snapshot is disposed"); + var newSlot = DataPage.FreeIndexSlot(page.FreeBytes); var initialSlot = page.PageListSlot; @@ -423,6 +444,8 @@ public void AddOrRemoveFreeDataList(DataPage page) /// public void AddOrRemoveFreeIndexList(IndexPage page, ref uint startPageID) { + ENSURE(!_disposed, "the snapshot is disposed"); + var newSlot = IndexPage.FreeIndexSlot(page.FreeBytes); var isOnList = page.PageListSlot == 0; var mustKeep = newSlot == 0; @@ -567,6 +590,8 @@ private void DeletePage(T page) /// public void DropCollection(Action safePoint) { + ENSURE(!_disposed, "the snapshot is disposed"); + var indexer = new IndexService(this, _header.Pragmas.Collation, _disk.MAX_ITEMS_COUNT); // CollectionPage will be last deleted page (there is no NextPageID from CollectionPage) diff --git a/LiteDB/Engine/Services/TransactionService.cs b/LiteDB/Engine/Services/TransactionService.cs index 8163e3643..31c2a6e42 100644 --- a/LiteDB/Engine/Services/TransactionService.cs +++ b/LiteDB/Engine/Services/TransactionService.cs @@ -113,7 +113,7 @@ public Snapshot CreateSnapshot(LockMode mode, string collection, bool addIfNotEx _snapshots[collection] = snapshot = create(); } - // update transaction mode to write in first write snaphost request + // update transaction mode to write in first write snaphost request if (mode == LockMode.Write) _mode = LockMode.Write; return snapshot; @@ -250,7 +250,7 @@ IEnumerable source() /// public void Commit() { - ENSURE(_state == TransactionState.Active, $"transaction must be active to commit (current state: {_state})"); + ENSURE(_state == TransactionState.Active, "transaction must be active to commit (current state: {0})", _state); LOG($"commit transaction ({_transPages.TransactionSize} pages)", "TRANSACTION"); @@ -281,7 +281,7 @@ public void Commit() /// public void Rollback() { - ENSURE(_state == TransactionState.Active, $"transaction must be active to rollback (current state: {_state})"); + ENSURE(_state == TransactionState.Active, "transaction must be active to rollback (current state: {0})", _state); LOG($"rollback transaction ({_transPages.TransactionSize} pages with {_transPages.NewPages.Count} returns)", "TRANSACTION"); diff --git a/LiteDB/LiteDB.csproj b/LiteDB/LiteDB.csproj index 1a6ca8561..2cd92c596 100644 --- a/LiteDB/LiteDB.csproj +++ b/LiteDB/LiteDB.csproj @@ -2,10 +2,10 @@ net4.5;netstandard1.3;netstandard2.0 - 5.0.19 - 5.0.19 - 5.0.19 - 5.0.19 + 5.0.20 + 5.0.20 + 5.0.20 + 5.0.20 Maurício David LiteDB LiteDB - A lightweight embedded .NET NoSQL document store in a single datafile @@ -28,7 +28,7 @@ true LiteDB.snk true - latest + latest