diff --git a/.idea/dataSources.xml b/.idea/dataSources.xml
index 4731705..8ef2342 100644
--- a/.idea/dataSources.xml
+++ b/.idea/dataSources.xml
@@ -11,5 +11,20 @@
$ProjectFileDir$
+
+ sqlite.xerial
+ true
+ org.sqlite.JDBC
+ jdbc:sqlite:$PROJECT_DIR$/tests/build/ddd.sqlite
+
+
+
+ $ProjectFileDir$
+
+
+ file://$APPLICATION_CONFIG_DIR$/jdbc-drivers/Xerial SQLiteJDBC/3.43.0/org/xerial/sqlite-jdbc/3.43.0.0/sqlite-jdbc-3.43.0.0.jar
+
+
+
\ No newline at end of file
diff --git a/src/kaso_mashin/common/base_types.py b/src/kaso_mashin/common/base_types.py
index 7d20c7b..920b237 100644
--- a/src/kaso_mashin/common/base_types.py
+++ b/src/kaso_mashin/common/base_types.py
@@ -34,11 +34,15 @@ class EntityInvariantException(KasoMashinException):
pass
+class EntityMaterialisationException(KasoMashinException):
+ pass
+
+
class ORMBase(DeclarativeBase):
"""
ORM base class for persisted entities
"""
- id: Mapped[str] = mapped_column(UUID(as_uuid=True).with_variant(String(32), 'sqlite'), primary_key=True)
+ uid: Mapped[str] = mapped_column(UUID(as_uuid=True).with_variant(String(32), 'sqlite'), primary_key=True)
@abc.abstractmethod
def merge(self, other: typing.Self):
@@ -49,93 +53,110 @@ def merge(self, other: typing.Self):
T_Model = typing.TypeVar('T_Model', bound=ORMBase)
-@dataclasses.dataclass
-class Entity(typing.Generic[T_Model]):
+class ValueObject(abc.ABC):
"""
- A domain entity
+ A domain value object
"""
- # TODO: Owner should be either T_AggregateRoot or T_AsyncAggregateRoot
- id: UniqueIdentifier = dataclasses.field(default_factory=lambda: uuid.uuid4())
- owner: typing.Optional[typing.Any] = dataclasses.field(default=None)
+ pass
-T_Entity = typing.TypeVar("T_Entity", bound=Entity)
+T_ValueObject = typing.TypeVar('T_ValueObject', bound=ValueObject)
-class ValueObject(abc.ABC):
+class Entity(object):
"""
- A domain value object
+ A domain entity
"""
- pass
+ def __init__(self, owner: 'AggregateRoot', uid: UniqueIdentifier = uuid.uuid4()) -> None:
+ self._uid = uid
+ self._owner = owner
-T_ValueObject = typing.TypeVar('T_ValueObject', bound=ValueObject)
+ @property
+ def uid(self) -> UniqueIdentifier:
+ return self._uid
+
+ def __eq__(self, other: object) -> bool:
+ return all([
+ isinstance(other, self.__class__),
+ self._uid == other._uid, # type: ignore[attr-defined]
+ self._owner == other._owner # type: ignore[attr-defined]
+ ])
-class AsyncAggregateRoot(typing.Generic[T_Entity, T_Model]):
+T_Entity = typing.TypeVar("T_Entity", bound=Entity)
+
+
+class AggregateRoot(typing.Generic[T_Entity, T_Model]):
def __init__(self, model: typing.Type[T_Model], session_maker: async_sessionmaker[AsyncSession]) -> None:
self._repository = AsyncRepository[T_Model](model=model, session_maker=session_maker)
self._identity_map: typing.Dict[UniqueIdentifier, T_Entity] = {}
- async def get(self, uid: UniqueIdentifier) -> T_Entity:
- model = await self._repository.get_by_id(str(uid))
- entity = await self.from_model(model)
- if not await self.validate(entity):
+ async def get(self, uid: UniqueIdentifier, force_reload: bool = False) -> T_Entity:
+ if not force_reload and uid in self._identity_map:
+ return self._identity_map[uid]
+ model = await self._repository.get_by_uid(str(uid))
+ entity = await self._from_model(model)
+ if not await self._validate(entity):
raise EntityInvariantException(code=500, msg='Restored entity fails validation')
- entity.owner = self
- self._identity_map[entity.id] = entity
- return self._identity_map[entity.id]
+ self._identity_map[entity.uid] = entity
+ return self._identity_map[entity.uid]
- async def list(self) -> typing.List[T_Entity]:
+ async def list(self, force_reload: bool = False) -> typing.List[T_Entity]:
+ if not force_reload:
+ return list(self._identity_map.values())
models = await self._repository.list()
- entities = [await self.from_model(model) for model in models]
+ entities = [await self._from_model(model) for model in models]
for entity in entities:
- if not await self.validate(entity):
- raise EntityInvariantException(code=500, msg='Entity fails validation')
- entity.owner = self
- self._identity_map.update({e.id: e for e in entities})
+ if not await self._validate(entity):
+ raise EntityInvariantException(code=400, msg='Entity fails validation')
+ self._identity_map.update({e.uid: e for e in entities})
return list(self._identity_map.values())
async def create(self, entity: T_Entity) -> T_Entity:
- if not self.validate(entity):
- raise EntityInvariantException(code=500, msg='Entity fails validation')
- model = await self._repository.create(await self.to_model(entity))
- self._identity_map[entity.id] = await self.from_model(model)
- self._identity_map[entity.id].owner = self
- return self._identity_map[entity.id]
-
- # TODO: Modify may make sense to be moved into the entity
- async def modify(self, entity: T_Entity) -> T_Entity:
- if entity.id not in self._identity_map:
+ if entity.uid in self._identity_map:
+ raise EntityInvariantException(code=400, msg='Entity already exists')
+ if not self._validate(entity):
+ raise EntityInvariantException(code=400, msg='Entity fails validation')
+ model = await self._repository.create(await self._to_model(entity))
+ self._identity_map[entity.uid] = await self._from_model(model)
+ return self._identity_map[entity.uid]
+
+ # Only methods in the entity should call this
+ async def modify(self, entity: T_Entity):
+ if entity.uid not in self._identity_map:
raise EntityInvariantException(code=400, msg='Entity was not created by its aggregate root')
- if not self.validate(entity):
+ if not self._validate(entity):
raise EntityInvariantException(code=400, msg='Entity fails validation')
- model = await self._repository.modify(await self.to_model(entity))
- self._identity_map[entity.id] = await self.from_model(model)
- self._identity_map[entity.id].owner = self
- return self._identity_map[entity.id]
+ await self._repository.modify(await self._to_model(entity))
- # TODO: Modify may make sense to be moved into the entity
+ # An entity should only be removed using this method
async def remove(self, uid: UniqueIdentifier):
if uid not in self._identity_map:
raise EntityInvariantException(code=400, msg='Entity was not created by its aggregate root')
await self._repository.remove(str(uid))
del self._identity_map[uid]
- async def validate(self, entity: T_Entity) -> bool:
+ async def _validate(self, entity: T_Entity) -> bool:
return True
@abc.abstractmethod
- async def to_model(self, entity: T_Entity) -> T_Model:
+ async def _to_model(self, entity: T_Entity) -> T_Model:
pass
@abc.abstractmethod
- async def from_model(self, model: T_Model) -> T_Entity:
+ async def _from_model(self, model: T_Model) -> T_Entity:
pass
-T_AsyncAggregateRoot = typing.TypeVar('T_AsyncAggregateRoot', bound=AsyncAggregateRoot)
+T_AggregateRoot = typing.TypeVar('T_AggregateRoot', bound=AggregateRoot)
+
+
+class DiskFormat(enum.StrEnum):
+ Raw = 'raw'
+ QCoW2 = 'qcow2'
+ VDI = 'vdi'
class BinaryScale(enum.StrEnum):
@@ -168,7 +189,7 @@ def __init__(self,
self._session_maker = session_maker
self._identity_map: typing.Dict[str, T_Model] = {}
- async def get_by_id(self, uid: str) -> T_Model:
+ async def get_by_uid(self, uid: str) -> T_Model:
if uid in self._identity_map:
return self._identity_map[uid]
async with self._session_maker() as session:
@@ -180,33 +201,28 @@ async def get_by_id(self, uid: str) -> T_Model:
async def list(self) -> typing.List[T_Model]:
async with self._session_maker() as session:
- # Alternative implementation
- # with await session.stream_scalars(select(self._model_clazz)) as result:
- # models = await result.all()
- # for model in models:
- # self._identity_map[UniqueIdentifier(model.id)] = self._aggregate_root_clazz.deserialise(model)
models = await session.scalars(select(self._model_clazz))
for model in models:
- self._identity_map[model.id] = model
+ self._identity_map[model.uid] = model
# Note: list() is semantically better but mypy complains about an incompatible arg
return [i for i in self._identity_map.values()]
async def create(self, model: T_Model) -> T_Model:
async with self._session_maker() as session:
- async with session.begin():
- session.add(model)
- self._identity_map[model.id] = model
+ session.add(model)
+ await session.commit()
+ self._identity_map[model.uid] = model
return model
async def modify(self, update: T_Model) -> T_Model:
async with self._session_maker() as session:
- current = await session.get(self._model_clazz, str(update.id))
+ current = await session.get(self._model_clazz, str(update.uid))
if current is None:
raise EntityNotFoundException(code=400, msg='No such entity')
current.merge(update)
session.add(current)
await session.commit()
- self._identity_map[update.id] = update
+ self._identity_map[update.uid] = update
return update
async def remove(self, uid: str) -> None:
@@ -216,3 +232,5 @@ async def remove(self, uid: str) -> None:
await session.delete(model)
await session.commit()
del self._identity_map[uid]
+
+
diff --git a/src/kaso_mashin/common/generics/disks.py b/src/kaso_mashin/common/generics/disks.py
index 328e207..6442589 100644
--- a/src/kaso_mashin/common/generics/disks.py
+++ b/src/kaso_mashin/common/generics/disks.py
@@ -1,14 +1,16 @@
-import dataclasses
-import os
import pathlib
import subprocess
+import uuid
from sqlalchemy import String, Integer, Enum
from sqlalchemy.orm import Mapped, mapped_column
-from kaso_mashin.common.base_types import Entity, BinarySizedValue, BinaryScale, KasoMashinException, ORMBase, \
- T_Entity, UniqueIdentifier, AsyncAggregateRoot
-from kaso_mashin.common.generics.images import ImageEntity
+from kaso_mashin.common.base_types import (
+ KasoMashinException,
+ ORMBase,
+ Entity, AggregateRoot,
+ BinarySizedValue,
+ UniqueIdentifier, BinaryScale, DiskFormat)
class DiskException(KasoMashinException):
@@ -24,90 +26,116 @@ class DiskModel(ORMBase):
path: Mapped[str] = mapped_column(String())
size: Mapped[int] = mapped_column(Integer, default=0)
size_scale: Mapped[str] = mapped_column(Enum(BinaryScale), default=BinaryScale.G)
+ format: Mapped[str] = mapped_column(Enum(DiskFormat), default=DiskFormat.Raw)
def merge(self, other: 'DiskModel'):
self.name = other.name
self.path = other.path
self.size = other.size
self.size_scale = other.size_scale
+ self.format = other.format
-@dataclasses.dataclass
-class DiskEntity(Entity[DiskModel]):
+class DiskEntity(Entity):
"""
Domain model entity for a disk
"""
- name: str = dataclasses.field(default=None)
- path: pathlib.Path = dataclasses.field(default=None)
- size: BinarySizedValue = dataclasses.field(default_factory=lambda: BinarySizedValue(5, BinaryScale.G))
- # TODO: This should create the disk and persist it in one 'transaction'
- @staticmethod
- def create(name: str, path: pathlib.Path, size: BinarySizedValue) -> 'DiskEntity':
- if path.exists():
- raise DiskException(code=400, msg=f'Disk at {path} already exists')
- path.parent.mkdir(parents=True, exist_ok=True)
- disk = DiskEntity(name=name, path=path, size=size)
- try:
- subprocess.run(['/opt/homebrew/bin/qemu-img',
- 'create',
- '-f', 'raw',
- disk.path,
- str(size)],
- check=True)
- except subprocess.CalledProcessError as e:
- raise DiskException(code=500, msg=f'Failed to create disk: {e.output}') from e
- return disk
+ def __init__(self,
+ owner: 'AggregateRoot',
+ name: str,
+ path: pathlib.Path,
+ size: BinarySizedValue = BinarySizedValue(2, BinaryScale.G),
+ disk_format: DiskFormat = DiskFormat.Raw,
+ uid: UniqueIdentifier = uuid.uuid4()) -> None:
+ super().__init__(owner=owner, uid=uid)
+ self._name = name
+ self._path = path
+ self._size = size
+ self._disk_format = disk_format
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ @property
+ def path(self) -> pathlib.Path:
+ return self._path
+
+ @property
+ def size(self) -> BinarySizedValue:
+ return self._size
+
+ @property
+ def disk_format(self) -> DiskFormat:
+ return self._disk_format
+
+ def __eq__(self, other: 'DiskEntity') -> bool: # type: ignore[override]
+ return all([
+ super().__eq__(other),
+ self._name == other.name,
+ self._path == other.path,
+ self._size == other.size,
+ self._disk_format == other.disk_format])
@staticmethod
- def create_from_image(name: str, path: pathlib.Path, size: BinarySizedValue, image: ImageEntity):
+ async def create(owner: 'AggregateRoot',
+ name: str,
+ path: pathlib.Path,
+ size: BinarySizedValue = BinarySizedValue(2, BinaryScale.G),
+ disk_format: DiskFormat = DiskFormat.Raw) -> 'DiskEntity':
if path.exists():
raise DiskException(code=400, msg=f'Disk at {path} already exists')
path.parent.mkdir(parents=True, exist_ok=True)
- disk = DiskEntity(name=name, path=path, size=size)
try:
+ disk = DiskEntity(owner=owner,
+ name=name,
+ path=path,
+ size=size,
+ disk_format=disk_format)
subprocess.run(['/opt/homebrew/bin/qemu-img',
'create',
- '-f', 'qcow2',
- '-F', 'qcow2',
- '-b', image.path,
- '-o', 'compat=v3',
- disk.path,
+ '-f', str(disk_format),
+ path,
str(size)],
check=True)
+ return await owner.create(disk)
except subprocess.CalledProcessError as e:
- raise DiskException(code=500, msg=f'Failed to create disk from image: {e.output}') from e
- return disk
+ path.unlink(missing_ok=True)
+ raise DiskException(code=500, msg=f'Failed to create disk: {e.output}') from e
- def resize(self, new_size: BinarySizedValue):
+ async def resize(self, value: BinarySizedValue):
try:
subprocess.run(['/opt/homebrew/bin/qemu-img',
'resize',
+ '-f', str(self.disk_format),
self.path,
- str(new_size)],
+ str(value)],
check=True)
- self.size = new_size
+ self._size = value
+ await self._owner.modify(self)
except subprocess.CalledProcessError as e:
raise DiskException(code=500, msg=f'Failed to resize disk: {e.output}') from e
- def remove(self):
- if not self.path.exists():
- return
- os.unlink(self.path)
+ async def remove(self):
+ self.path.unlink(missing_ok=True)
+ await self._owner.remove(self.uid)
-class AsyncDiskAggregateRoot(AsyncAggregateRoot[DiskEntity, DiskModel]):
+class DiskAggregateRoot(AggregateRoot[DiskEntity, DiskModel]):
- async def to_model(self, entity: DiskEntity) -> DiskModel:
- return DiskModel(id=str(entity.id),
+ async def _to_model(self, entity: DiskEntity) -> DiskModel:
+ return DiskModel(uid=str(entity.uid),
name=entity.name,
path=str(entity.path),
size=entity.size.value,
- size_scale=entity.size.scale)
+ size_scale=entity.size.scale,
+ format=str(entity.disk_format))
- async def from_model(self, model: DiskModel) -> DiskEntity:
+ async def _from_model(self, model: DiskModel) -> DiskEntity:
return DiskEntity(owner=self,
- id=UniqueIdentifier(model.id),
+ uid=UniqueIdentifier(model.uid),
name=model.name,
path=pathlib.Path(model.path),
- size=BinarySizedValue(model.size, BinaryScale(model.size_scale)))
+ size=BinarySizedValue(model.size, BinaryScale(model.size_scale)),
+ disk_format=DiskFormat(model.format))
diff --git a/src/kaso_mashin/common/generics/images.py b/src/kaso_mashin/common/generics/images.py
index 620f06f..2a49e61 100644
--- a/src/kaso_mashin/common/generics/images.py
+++ b/src/kaso_mashin/common/generics/images.py
@@ -1,17 +1,40 @@
import dataclasses
+import subprocess
import pathlib
+import typing
+import uuid
-from sqlalchemy import String, Integer, Enum
-from sqlalchemy.orm import Mapped, mapped_column
+from sqlalchemy import String, Integer, Enum, ForeignKey
+from sqlalchemy.orm import Mapped, mapped_column, relationship
-from kaso_mashin.common.base_types import Entity, BinarySizedValue, BinaryScale, KasoMashinException, ORMBase, \
- AsyncAggregateRoot, UniqueIdentifier
+from kaso_mashin.common.base_types import (
+ KasoMashinException,
+ ORMBase,
+ Entity, AggregateRoot, ValueObject,
+ BinarySizedValue,
+ UniqueIdentifier, BinaryScale, DiskFormat)
class ImageException(KasoMashinException):
pass
+class OSDiskModel(ORMBase):
+ __tablename__ = 'os_disks'
+ path: Mapped[str] = mapped_column(String())
+ image_id: Mapped[str] = mapped_column(ForeignKey("images.uid"))
+
+ def merge(self, other: 'OSDiskModel'):
+ self.path = other.path
+ self.image_id = other.image_id
+
+
+@dataclasses.dataclass(frozen=True)
+class OSDiskValueObject(ValueObject):
+ path: pathlib.Path
+ image_id: UniqueIdentifier
+
+
class ImageModel(ORMBase):
"""
Representation of an image entity in the database
@@ -24,6 +47,7 @@ class ImageModel(ORMBase):
min_ram_scale: Mapped[str] = mapped_column(Enum(BinaryScale), default=BinaryScale.G)
min_disk: Mapped[int] = mapped_column(Integer, default=0)
min_disk_scale: Mapped[str] = mapped_column(Enum(BinaryScale), default=BinaryScale.G)
+ os_disks: Mapped[typing.List[OSDiskModel]] = relationship()
def merge(self, other: 'ImageModel'):
self.name = other.name
@@ -35,35 +59,142 @@ def merge(self, other: 'ImageModel'):
self.min_disk_scale = other.min_disk_scale
-@dataclasses.dataclass
-class ImageEntity(Entity[ImageModel]):
+class ImageEntity(Entity):
"""
Domain model for an image
"""
- name: str = dataclasses.field(default=None)
- path: pathlib.Path = dataclasses.field(default=None)
- min_vcpu: int = dataclasses.field(default=0)
- min_ram: BinarySizedValue = dataclasses.field(default_factory=lambda: BinarySizedValue(0, BinaryScale.G))
- min_disk: BinarySizedValue = dataclasses.field(default_factory=lambda: BinarySizedValue(0, BinaryScale.G))
+
+ def __init__(self,
+ owner: 'AggregateRoot',
+ name: str,
+ path: pathlib.Path,
+ min_vcpu: int = 0,
+ min_ram: BinarySizedValue = BinarySizedValue(0, BinaryScale.G),
+ min_disk: BinarySizedValue = BinarySizedValue(0, BinaryScale.G),
+ os_disks: typing.List[OSDiskValueObject] | None = None,
+ uid: UniqueIdentifier = uuid.uuid4()) -> None:
+ super().__init__(owner, uid)
+ self._name = name
+ self._path = path
+ self._min_vcpu = min_vcpu
+ self._min_ram = min_ram
+ self._min_disk = min_disk
+ self._os_disks = os_disks or []
+ self._downloaded = 0
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ @property
+ def path(self) -> pathlib.Path:
+ return self._path
+
+ @property
+ def min_vcpu(self) -> int:
+ return self._min_vcpu
+
+ @property
+ def min_ram(self) -> BinarySizedValue:
+ return self._min_ram
+
+ @property
+ def min_disk(self) -> BinarySizedValue:
+ return self._min_disk
+
+ @property
+ def os_disks(self) -> typing.List[OSDiskValueObject]:
+ return self._os_disks
+
+ def __eq__(self, other: 'ImageEntity') -> bool: # type: ignore[override]
+ return all([
+ super().__eq__(other),
+ self._name == other.name,
+ self._path == other.path,
+ self._min_vcpu == other.min_vcpu,
+ self._min_ram == other.min_ram,
+ self._min_disk == other.min_disk])
+
+ @staticmethod
+ async def create(owner: 'AggregateRoot',
+ name: str,
+ path: pathlib.Path,
+ min_vcpu: int = 0,
+ min_ram: BinarySizedValue = BinarySizedValue(0, BinaryScale.G),
+ min_disk: BinarySizedValue = BinarySizedValue(0, BinaryScale.G)) -> 'ImageEntity':
+ if path.exists():
+ raise ImageException(code=400, msg=f'Image at {path} already exists')
+ path.parent.mkdir(parents=True, exist_ok=True)
+ try:
+ image = ImageEntity(owner=owner,
+ name=name,
+ path=path,
+ min_vcpu=min_vcpu,
+ min_ram=min_ram,
+ min_disk=min_disk)
+ return await owner.create(image)
+ finally:
+ pass
+
+ async def create_os_disk(self, path: pathlib.Path, size: BinarySizedValue):
+ if path.exists():
+ raise ImageException(code=400, msg=f'OS Disk at {path} already exists')
+ path.parent.mkdir(parents=True, exist_ok=True)
+ try:
+ subprocess.run(['/opt/homebrew/bin/qemu-img',
+ 'create',
+ '-f', str(DiskFormat.QCoW2),
+ '-F', str(DiskFormat.QCoW2),
+ '-b', str(path),
+ '-o', 'compat=v3',
+ path,
+ str(size)],
+ check=True)
+ self.os_disks.append(OSDiskValueObject(path=path, image_id=self.uid))
+ return await self._owner.modify(self)
+ except subprocess.CalledProcessError as e:
+ path.unlink(missing_ok=True)
+ raise ImageException(code=500, msg=f'Failed to create OS disk from image: {e.output}') from e
+
+ async def remove(self):
+ self.path.unlink(missing_ok=True)
+ await self._owner.remove(self.uid)
+
+ async def set_min_vcpu(self, value: int):
+ self._min_vcpu = value
+ await self._owner.modify(self)
+
+ async def set_min_ram(self, value: BinarySizedValue):
+ self._min_ram = value
+ await self._owner.modify(self)
+
+ async def set_min_disk(self, value: BinarySizedValue):
+ self._min_disk = value
+ await self._owner.modify(self)
-class AsyncImageAggregateRoot(AsyncAggregateRoot[ImageEntity, ImageModel]):
+class ImageAggregateRoot(AggregateRoot[ImageEntity, ImageModel]):
- async def to_model(self, entity: ImageEntity) -> ImageModel:
- return ImageModel(id=str(entity.id),
+ async def _to_model(self, entity: ImageEntity) -> ImageModel:
+ os_disks = [OSDiskModel(path=d.path, image_id=d.image_id) for d in entity.os_disks]
+ return ImageModel(uid=str(entity.uid),
name=entity.name,
path=str(entity.path),
min_vcpu=entity.min_vcpu,
min_ram=entity.min_ram.value,
min_ram_scale=entity.min_ram.scale,
min_disk=entity.min_disk.value,
- min_disk_scale=entity.min_disk.scale)
+ min_disk_scale=entity.min_disk.scale,
+ os_disks=os_disks)
- async def from_model(self, model: ImageModel) -> ImageEntity:
+ async def _from_model(self, model: ImageModel) -> ImageEntity:
return ImageEntity(owner=self,
- id=UniqueIdentifier(model.id),
+ uid=UniqueIdentifier(model.uid),
name=model.name,
path=pathlib.Path(model.path),
min_vcpu=model.min_vcpu,
min_ram=BinarySizedValue(model.min_ram, BinaryScale(model.min_ram_scale)),
- min_disk=BinarySizedValue(model.min_disk, BinaryScale(model.min_disk_scale)))
+ min_disk=BinarySizedValue(model.min_disk, BinaryScale(model.min_disk_scale)),
+ os_disks=[OSDiskValueObject(path=pathlib.Path(d.path),
+ image_id=UniqueIdentifier(d.image_id))
+ for d in model.os_disks])
diff --git a/tests/test_generics.py b/tests/test_generics.py
index 9cfbd41..93f83c1 100644
--- a/tests/test_generics.py
+++ b/tests/test_generics.py
@@ -2,64 +2,54 @@
import pathlib
from kaso_mashin.common.base_types import BinaryScale, BinarySizedValue
-from kaso_mashin.common.generics.disks import DiskEntity, DiskModel, AsyncDiskAggregateRoot
+from kaso_mashin.common.generics.disks import DiskEntity, DiskModel, DiskAggregateRoot
+from kaso_mashin.common.generics.images import ImageEntity, ImageModel, ImageAggregateRoot
-@pytest.mark.asyncio(scope='module')
+@pytest.mark.asyncio
async def test_async_disks(generics_async_session_maker):
- aggregate_root = AsyncDiskAggregateRoot(model=DiskModel, session_maker=generics_async_session_maker)
- disk = await aggregate_root.create(DiskEntity(name='Test Disk',
- path=pathlib.Path(__file__).parent / 'build' / 'test.qcow2',
- size=BinarySizedValue(1, BinaryScale.G)))
try:
- loaded = await aggregate_root.get(disk.id)
+ disk_aggregate_root = DiskAggregateRoot(model=DiskModel, session_maker=generics_async_session_maker)
+ disk = await DiskEntity.create(owner=disk_aggregate_root,
+ name='Test Disk',
+ path=pathlib.Path(__file__).parent / 'build' / 'test-async-disks.qcow2',
+ size=BinarySizedValue(1, BinaryScale.M))
+
+ loaded = await disk_aggregate_root.get(disk.uid)
assert disk == loaded
- disk.size = BinarySizedValue(2, scale=BinaryScale.G)
- updated = await aggregate_root.modify(disk)
+ await disk.resize(BinarySizedValue(2, scale=BinaryScale.M))
+ updated = await disk_aggregate_root.get(disk.uid, force_reload=True)
+ assert disk.size == updated.size
assert disk == updated
- listed = await aggregate_root.list()
+ listed = await disk_aggregate_root.list()
assert len(listed) == 1
assert disk == listed[0]
finally:
- await aggregate_root.remove(disk.id)
- assert len(await aggregate_root.list()) == 0
+ await disk.remove()
+ assert len(await disk_aggregate_root.list()) == 0
assert not disk.path.exists()
-# def test_applied_disks_from_image(applied_session):
-# repo = DiskRepository(DiskModel, applied_session)
-# image = ImageEntity(name='jammy', path=pathlib.Path('/Users/imfeldma/var/kaso/images/jammy.qcow2'))
-# disk = DiskEntity.create_from_image('Test Disk',
-# path=pathlib.Path(__file__).parent / 'build' / 'test.qcow2',
-# size=BinarySizedValue(1, BinaryScale.G),
-# image=image)
-# try:
-# repo.create(disk)
-# disk.resize(BinarySizedValue(2, BinaryScale.G))
-# repo.modify(disk)
-# finally:
-# disk.remove()
-# assert not disk.path.exists()
-#
-# def test_applied_images(applied_session):
-# repo = ImageRepository(ImageModel, applied_session)
-# image = ImageEntity(name='Test Image',
-# path=pathlib.Path(__file__),
-# min_vcpu=2,
-# min_ram=BinarySizedValue(2, BinaryScale.G),
-# min_disk=BinarySizedValue(1, BinaryScale.G))
-# try:
-# repo.create(image)
-# loaded = repo.get_by_id(image.id)
-# assert image == loaded
-# image.min_vcpu = 99
-# image.min_ram = BinarySizedValue(99, BinaryScale.T)
-# image.min_disk = BinarySizedValue(99, BinaryScale.T)
-# updated = repo.modify(image)
-# assert image == updated
-# listed = repo.list()
-# assert len(listed) == 1
-# assert image == listed[0]
-# finally:
-# repo.remove(image.id)
-# assert repo.get_by_id(image.id) is None
+@pytest.mark.asyncio
+async def test_async_images(generics_async_session_maker):
+ try:
+ image_aggregate_root = ImageAggregateRoot(model=ImageModel, session_maker=generics_async_session_maker)
+ image = await ImageEntity.create(owner=image_aggregate_root,
+ name='Test Image',
+ path=pathlib.Path(__file__).parent / 'build' / 'test-async-images.qcow2')
+ loaded = await image_aggregate_root.get(image.uid)
+ assert image == loaded
+ await image.set_min_vcpu(10)
+ await image.set_min_ram(BinarySizedValue(2, BinaryScale.G))
+ await image.set_min_disk(BinarySizedValue(10, BinaryScale.G))
+ # await image.create_os_disk(path=pathlib.Path(__file__).parent / 'build' / 'os.qcow2',
+ # size=BinarySizedValue(10, BinaryScale.M))
+ updated = await image_aggregate_root.get(image.uid)
+ assert image == updated
+ listed = await image_aggregate_root.list()
+ assert len(listed) == 1
+ assert image == listed[0]
+ finally:
+ await image.remove()
+ assert len(await image_aggregate_root.list()) == 0
+ assert not image.path.exists()