Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Models Redesign #94

Merged
merged 30 commits into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
a37d350
refactor: update models
annehaley Dec 30, 2024
74e9eb5
fix: Update `admin.py`
annehaley Dec 30, 2024
05116b0
refactor: Update rest API
annehaley Dec 31, 2024
4c295fb
test: Update test suite
annehaley Jan 2, 2025
c2cd3e3
build: Update dockerfile to support conversion of `.nc` files
annehaley Jan 5, 2025
a126080
refactor: Update tasks
annehaley Jan 5, 2025
347056a
feat: Add query param filtering to vector tile endpoint
annehaley Jan 6, 2025
3afbfcd
fix: Update `LayerSerializer`
annehaley Jan 6, 2025
0fe6753
fix: Use `band_ref` JSON field on `LayerFrame`
annehaley Jan 6, 2025
b7ca918
feat: Add migration file
annehaley Jan 6, 2025
114f9bf
fix: Update populate process and sample data
annehaley Jan 9, 2025
7d899d7
refactor: Rename `band_ref` field to `source_filters`
annehaley Jan 15, 2025
161697a
fix: Add combine option for zipped vector datasets
annehaley Jan 15, 2025
d225d4a
fix: Adjust ingest process for New York Energy use case
annehaley Jan 15, 2025
bcaf361
fix: Update list of ignored filetypes
annehaley Jan 15, 2025
1df004d
refactor: Consolidate layer and frame creation logic to single function
annehaley Jan 15, 2025
a6a2a01
fix: Additional dataset conversion adjustments
annehaley Jan 15, 2025
0c8673e
fix: Protect admin page from null source files on data objects
annehaley Jan 15, 2025
7299af9
fix: Include access control logic for RasterData and VectorData
annehaley Jan 30, 2025
750c135
refactor: Rename `SourceRegion` -> `Region`
annehaley Jan 31, 2025
42aec47
fix: Update default layer name generation
annehaley Jan 31, 2025
11f7cde
refactor: Remove `dataset` field from `Network` and make `vector_data…
annehaley Jan 31, 2025
3c24d8c
chore: Remove print statement
annehaley Jan 31, 2025
03ba49b
fix: Update populate test
annehaley Jan 31, 2025
afe2c7d
fix: Update tests with new Network-Dataset relationship
annehaley Jan 31, 2025
9560940
fix: Update API with new Network-Dataset relationship
annehaley Feb 4, 2025
81dffa1
fix: Update vector feature filtering to allow nested properties
annehaley Feb 5, 2025
a15e8da
feat: Allow additional filters specification in layer options
annehaley Feb 5, 2025
48c0a0e
fix: Update `get_filter_string` function
annehaley Feb 5, 2025
22bc1f6
fix: Update `data.py` for lint check
annehaley Feb 5, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dev/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ RUN python -m pip install ./tile2net
COPY ./setup.py /opt/uvdat-server/setup.py
COPY ./manage.py /opt/uvdat-server/manage.py
COPY ./uvdat /opt/uvdat-server/uvdat
RUN pip install large-image[gdal,pil] large-image-converter --find-links https://girder.github.io/large_image_wheels
RUN pip install large-image[gdal,pil,mapnik] large-image-converter --find-links https://girder.github.io/large_image_wheels
RUN pip install --editable /opt/uvdat-server[dev]

# Use a directory name which will never be an import name, as isort considers this as first-party.
Expand Down
17 changes: 8 additions & 9 deletions sample_data/ingest_use_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,16 @@ def ingest_file(file_info, index=0, dataset=None, chart=None):
file_location = Path(DOWNLOADS_FOLDER, file_path)
file_type = file_path.split('.')[-1]
if not file_location.exists():
print(f'\t Downloading data file {file_name}.')
print(f'\t\t Downloading data file {file_name}.')
file_location.parent.mkdir(parents=True, exist_ok=True)
with open(file_location, 'wb') as f:
r = requests.get(file_url)
r.raise_for_status()
f.write(r.content)

existing = FileItem.objects.filter(name=file_name)
existing = FileItem.objects.filter(dataset=dataset, name=file_name)
if existing.count():
print('\t', f'FileItem {file_name} already exists.')
print('\t\t', f'FileItem {file_name} already exists.')
else:
new_file_item = FileItem.objects.create(
name=file_name,
Expand All @@ -51,7 +51,7 @@ def ingest_file(file_info, index=0, dataset=None, chart=None):
),
index=index,
)
print('\t', f'FileItem {new_file_item.name} created.')
print('\t\t', f'FileItem {new_file_item.name} created.')
with file_location.open('rb') as f:
new_file_item.file.save(file_path, ContentFile(f.read()))

Expand All @@ -74,7 +74,7 @@ def ingest_projects(use_case):
},
)
if created:
print('\t', f'Project {project_for_setting.name} created.')
print('\t\t', f'Project {project_for_setting.name} created.')

project_for_setting.datasets.set(Dataset.objects.filter(name__in=project['datasets']))
project_for_setting.set_permissions(owner=User.objects.filter(is_superuser=True).first())
Expand All @@ -100,7 +100,7 @@ def ingest_charts(use_case):
metadata=chart.get('metadata'),
editable=chart.get('editable', False),
)
print('\t', f'Chart {new_chart.name} created.')
print('\t\t', f'Chart {new_chart.name} created.')
for index, file_info in enumerate(chart.get('files', [])):
ingest_file(
file_info,
Expand All @@ -109,7 +109,7 @@ def ingest_charts(use_case):
)
chart_for_conversion = new_chart

print('\t', f'Converting data for {chart_for_conversion.name}...')
print('\t\t', f'Converting data for {chart_for_conversion.name}.')
chart_for_conversion.spawn_conversion_task(
conversion_options=chart.get('conversion_options'),
asynchronous=False,
Expand All @@ -124,6 +124,7 @@ def ingest_datasets(use_case, include_large=False, dataset_indexes=None):
data = json.load(datasets_json)
for index, dataset in enumerate(data):
if dataset_indexes is None or index in dataset_indexes:
print('\t- ', dataset['name'])
existing = Dataset.objects.filter(name=dataset['name'])
if existing.count():
dataset_for_conversion = existing.first()
Expand All @@ -133,10 +134,8 @@ def ingest_datasets(use_case, include_large=False, dataset_indexes=None):
name=dataset['name'],
description=dataset['description'],
category=dataset['category'],
dataset_type=dataset.get('type', 'vector').upper(),
metadata=dataset.get('metadata', {}),
)
print('\t', f'Dataset {new_dataset.name} created.')
for index, file_info in enumerate(dataset.get('files', [])):
ingest_file(
file_info,
Expand Down
Loading