Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert with foreign_key_enforcement_enabled #18625

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions chia/_tests/core/data_layer/test_data_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,11 +591,11 @@ async def test_ancestor_table_unique_inserts(data_store: DataStore, store_id: by
await add_0123_example(data_store=data_store, store_id=store_id)
hash_1 = bytes32.from_hexstr("0763561814685fbf92f6ca71fbb1cb11821951450d996375c239979bd63e9535")
hash_2 = bytes32.from_hexstr("924be8ff27e84cba17f5bc918097f8410fab9824713a4668a21c8e060a8cab40")
await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
await data_store._insert_ancestor_table([(hash_1, hash_2)], store_id, 2)
await data_store._insert_ancestor_table([(hash_1, hash_2)], store_id, 2)
with pytest.raises(Exception, match="^Requested insertion of ancestor"):
await data_store._insert_ancestor_table(hash_1, hash_1, store_id, 2)
await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
await data_store._insert_ancestor_table([(hash_1, hash_1)], store_id, 2)
await data_store._insert_ancestor_table([(hash_1, hash_2)], store_id, 2)


@pytest.mark.anyio
Expand Down Expand Up @@ -685,8 +685,7 @@ async def test_inserting_invalid_length_ancestor_hash_raises_original_exception(
with pytest.raises(aiosqlite.IntegrityError):
# casting since we are testing an invalid case
await data_store._insert_ancestor_table(
left_hash=bytes32(b"\x01" * 32),
right_hash=bytes32(b"\x02" * 32),
hashes=[(bytes32(b"\x01" * 32), bytes32(b"\x02" * 32))],
store_id=store_id,
generation=0,
)
Expand Down
164 changes: 77 additions & 87 deletions chia/data_layer/data_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,58 +350,58 @@ async def _insert_internal_node(self, left_hash: bytes32, right_hash: bytes32) -

async def _insert_ancestor_table(
self,
left_hash: bytes32,
right_hash: bytes32,
hashes: List[Tuple[bytes32, bytes32]],
store_id: bytes32,
generation: int,
foreign_key_enforcement_enabled: bool = True,
) -> None:
node_hash = internal_hash(left_hash=left_hash, right_hash=right_hash)

async with self.db_wrapper.writer() as writer:
for hash in (left_hash, right_hash):
values = {
"hash": hash,
"ancestor": node_hash,
"tree_id": store_id,
"generation": generation,
}
try:
await writer.execute(
"""
INSERT INTO ancestors(hash, ancestor, tree_id, generation)
VALUES (:hash, :ancestor, :tree_id, :generation)
""",
values,
)
except aiosqlite.IntegrityError as e:
if not e.args[0].startswith("UNIQUE constraint"):
# UNIQUE constraint failed: ancestors.hash, ancestors.tree_id, ancestors.generation
raise

async with writer.execute(
"""
SELECT *
FROM ancestors
WHERE hash == :hash AND generation == :generation AND tree_id == :tree_id
LIMIT 1
""",
{"hash": hash, "generation": generation, "tree_id": store_id},
) as cursor:
result = await cursor.fetchone()

if result is None:
# some ideas for causes:
# an sqlite bug
# bad queries in this function
# unexpected db constraints
raise Exception("Unable to find conflicting row") from e # pragma: no cover

result_dict = dict(result)
if result_dict != values:
raise Exception(
"Requested insertion of ancestor, where ancestor differ, but other values are identical: "
f"{hash} {generation} {store_id}"
) from None
async with self.db_wrapper.writer(foreign_key_enforcement_enabled=foreign_key_enforcement_enabled) as writer:
for left_hash, right_hash in hashes:
node_hash = internal_hash(left_hash=left_hash, right_hash=right_hash)
for hash in (left_hash, right_hash):
values = {
"hash": hash,
"ancestor": node_hash,
"tree_id": store_id,
"generation": generation,
}
try:
await writer.execute(
"""
INSERT INTO ancestors(hash, ancestor, tree_id, generation)
VALUES (:hash, :ancestor, :tree_id, :generation)
""",
values,
)
except aiosqlite.IntegrityError as e:
if not e.args[0].startswith("UNIQUE constraint"):
# UNIQUE constraint failed: ancestors.hash, ancestors.tree_id, ancestors.generation
raise

async with writer.execute(
"""
SELECT *
FROM ancestors
WHERE hash == :hash AND generation == :generation AND tree_id == :tree_id
LIMIT 1
""",
{"hash": hash, "generation": generation, "tree_id": store_id},
) as cursor:
result = await cursor.fetchone()

if result is None:
# some ideas for causes:
# an sqlite bug
# bad queries in this function
# unexpected db constraints
raise Exception("Unable to find conflicting row") from e # pragma: no cover

result_dict = dict(result)
if result_dict != values:
raise Exception(
"Requested insertion of ancestor, where ancestor differ, but other values are"
f" identical: {hash} {generation} {store_id}"
) from None

async def _insert_terminal_node(self, key: bytes, value: bytes) -> bytes32:
# forcing type hint here for:
Expand Down Expand Up @@ -1141,11 +1141,11 @@ async def update_ancestor_hashes_on_insert(
root: Root,
) -> Root:
# update ancestors after inserting root, to keep table constraints.
insert_ancestors_cache: List[Tuple[bytes32, bytes32, bytes32]] = []
insert_ancestors_cache: List[Tuple[bytes32, bytes32]] = []
new_generation = root.generation + 1
# create first new internal node
new_hash = await self._insert_internal_node(left_hash=left, right_hash=right)
insert_ancestors_cache.append((left, right, store_id))
insert_ancestors_cache.append((left, right))

# create updated replacements for the rest of the internal nodes
for ancestor in ancestors:
Expand All @@ -1162,7 +1162,7 @@ async def update_ancestor_hashes_on_insert(
traversal_node_hash = ancestor.hash

new_hash = await self._insert_internal_node(left_hash=left, right_hash=right)
insert_ancestors_cache.append((left, right, store_id))
insert_ancestors_cache.append((left, right))

new_root = await self._insert_root(
store_id=store_id,
Expand All @@ -1172,8 +1172,7 @@ async def update_ancestor_hashes_on_insert(
)

if status == Status.COMMITTED:
for left_hash, right_hash, store_id in insert_ancestors_cache:
await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation)
await self._insert_ancestor_table(insert_ancestors_cache, store_id, new_generation)

return new_root

Expand Down Expand Up @@ -1306,7 +1305,7 @@ async def delete(
else:
new_generation = root.generation + 1
# update ancestors after inserting root, to keep table constraints.
insert_ancestors_cache: List[Tuple[bytes32, bytes32, bytes32]] = []
insert_ancestors_cache: List[Tuple[bytes32, bytes32]] = []
# more parents to handle so let's traverse them
for ancestor in ancestors[1:]:
if ancestor.left_hash == old_child_hash:
Expand All @@ -1319,7 +1318,7 @@ async def delete(
raise Exception("Internal error.")

new_child_hash = await self._insert_internal_node(left_hash=left_hash, right_hash=right_hash)
insert_ancestors_cache.append((left_hash, right_hash, store_id))
insert_ancestors_cache.append((left_hash, right_hash))
old_child_hash = ancestor.hash

new_root = await self._insert_root(
Expand All @@ -1329,8 +1328,7 @@ async def delete(
generation=new_generation,
)
if status == Status.COMMITTED:
for left_hash, right_hash, store_id in insert_ancestors_cache:
await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation)
await self._insert_ancestor_table(insert_ancestors_cache, store_id, new_generation)

return new_root

Expand Down Expand Up @@ -1737,40 +1735,32 @@ async def _get_one_ancestor_multiple_hashes(
return [InternalNode.from_row(row=row) for row in rows]

async def build_ancestor_table_for_latest_root(self, store_id: bytes32) -> None:
async with self.db_wrapper.writer() as writer:
async with self.db_wrapper.writer():
root = await self.get_tree_root(store_id=store_id)
if root.node_hash is None:
return
previous_root = await self.get_tree_root(
store_id=store_id,
generation=max(root.generation - 1, 0),
)

await writer.execute(
"""
WITH RECURSIVE tree_from_root_hash AS (
SELECT
node.hash,
node.left,
node.right,
NULL AS ancestor
FROM node
WHERE node.hash = :root_hash
UNION ALL
SELECT
node.hash,
node.left,
node.right,
tree_from_root_hash.hash AS ancestor
FROM node
JOIN tree_from_root_hash ON node.hash = tree_from_root_hash.left
OR node.hash = tree_from_root_hash.right
if previous_root.node_hash is not None:
previous_internal_nodes: List[InternalNode] = await self.get_internal_nodes(
store_id=store_id,
root_hash=previous_root.node_hash,
)
INSERT OR REPLACE INTO ancestors (hash, ancestor, tree_id, generation)
SELECT
tree_from_root_hash.hash,
tree_from_root_hash.ancestor,
:tree_id,
:generation
FROM tree_from_root_hash
""",
{"root_hash": root.node_hash, "tree_id": store_id, "generation": root.generation},
known_hashes: Set[bytes32] = {node.hash for node in previous_internal_nodes}
else:
known_hashes = set()
internal_nodes: List[InternalNode] = await self.get_internal_nodes(
store_id=store_id,
root_hash=root.node_hash,
)
insert_ancestors_cache: List[Tuple[bytes32, bytes32]] = [
(node.left_hash, node.right_hash) for node in internal_nodes if node.hash not in known_hashes
]
await self._insert_ancestor_table(
insert_ancestors_cache, store_id, root.generation, foreign_key_enforcement_enabled=False
)

async def insert_root_with_ancestor_table(
Expand Down
Loading