Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: Save .obs data as categorical dtype #478

Merged
merged 3 commits into from
Jan 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 11 additions & 5 deletions sfaira/data/store/io/io_dao.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,16 @@ def write_dao(store: Union[str, Path], adata: anndata.AnnData, chunks: Union[boo
# convert to dict to get rid of anndata OverloadedDict
pickle.dump(obj=dict(adata.uns), file=f)
# Write .obs and .var as a separate file as this can be easily interfaced with DataFrames.
adata.obs.iloc[perm].to_parquet(path=path_obs(store), engine='pyarrow', compression='snappy', index=None)
(
adata
.obs.iloc[perm]
# make sure all columns are dtype=str and are converted to categorical
# this has to change if we have numeric values in obs
# exclude 'perumtation_original_data' column from this as it's numeric
.astype({col: str for col in adata.obs.columns if col not in ['permutation_original_data']})
.astype({col: 'category' for col in adata.obs.columns if col not in ['permutation_original_data']})
.to_parquet(path=path_obs(store), engine='pyarrow', compression='snappy', index=None)
)
adata.var.to_parquet(path=path_var(store), engine='pyarrow', compression='snappy', index=None)


Expand All @@ -117,6 +126,7 @@ def read_dao(store: Union[str, Path], use_dask: bool = True, columns: Union[None
:param use_dask: Whether to use lazy dask arrays where appropriate.
:param columns: Which columns to read into the obs copy in the output, see pandas.read_parquet().
:param obs_separate: Whether to return .obs as a separate return value or in the returned AnnData.
:param x_separate: Whether to return .X as a separate return value or in the returned AnnData.
:return: Tuple of:
- AnnData with .X as dask array.
- obs table separately as dataframe
Expand All @@ -134,10 +144,6 @@ def read_dao(store: Union[str, Path], use_dask: bool = True, columns: Union[None
# Read tables:
obs = pd.read_parquet(path_obs(store), columns=columns, engine="pyarrow")
var = pd.read_parquet(path_var(store), engine="pyarrow")
# Convert to categorical variables where possible to save memory:
# for k, dtype in zip(list(obs.columns), obs.dtypes):
# if dtype == "object":
# obs[k] = obs[k].astype(dtype="category")
d = {"var": var, "uns": uns}
# Assemble AnnData without obs to save memory:
adata = anndata.AnnData(**d, shape=x.shape)
Expand Down
42 changes: 35 additions & 7 deletions sfaira/data/store/stores/single.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
import abc
import os
import pickle
from typing import Dict, List, Tuple, Union

import anndata
import dask.array
import dask.dataframe
import numpy as np
import os
import pandas as pd
import pickle
from typing import Dict, List, Tuple, Union

from sfaira.consts import AdataIdsSfaira, OCS
from sfaira.data.dataloaders.base.utils import is_child, UNS_STRING_META_IN_OBS
from sfaira.data.store.stores.base import StoreBase
from sfaira.data.store.carts.single import CartAnndata, CartDask, CartSingle
from sfaira.data.store.stores.base import StoreBase
from sfaira.versions.genomes.genomes import GenomeContainer, ReactiveFeatureContainer

"""
Expand Down Expand Up @@ -70,7 +70,7 @@ class StoreSingleFeatureSpace(StoreBase):

def __init__(self, adata_by_key: Dict[str, anndata.AnnData], indices: Dict[str, np.ndarray],
obs_by_key: Union[None, Dict[str, dask.dataframe.DataFrame]] = None, data_source: str = "X"):
self.adata_by_key = adata_by_key
self.adata_by_key = self.__align_categorical_levels(adata_by_key)
self.indices = indices
self.obs_by_key = obs_by_key
self.ontology_container = OCS
Expand All @@ -79,6 +79,34 @@ def __init__(self, adata_by_key: Dict[str, anndata.AnnData], indices: Dict[str,
self.data_source = data_source
self._celltype_universe = None

@staticmethod
def __align_categorical_levels(adata_by_key: Dict[str, anndata.AnnData]) -> Dict[str, anndata.AnnData]:
"""
Align the categorical levels across all datasets.

:param adata_by_key: Dict[str, anndata.Anndata]
:return: Dict[str, anndata.Anndata]
"""
datasets = list(adata_by_key.keys())
# get list of all categorical columns - using one dataset is enough as they all have the same columns
categorical_columns: List[str] = []
for col in adata_by_key[datasets[0]].obs.columns:
if isinstance(adata_by_key[datasets[0]].obs[col].dtype, pd.api.types.CategoricalDtype):
categorical_columns.append(col)
# union categorical levels across datasets for each column
categories_columns: Dict[str, pd.Index] = {}
for col in categorical_columns:
categories_columns[col] = pd.api.types.union_categoricals(
[pd.Categorical(v.obs[col].cat.categories) for v in adata_by_key.values()]
).categories
# update categorical columns
for dataset in datasets:
for col, categories in categories_columns.items():
adata_by_key[dataset].obs[col] = pd.Categorical(adata_by_key[dataset].obs[col],
categories=categories)

return adata_by_key

@property
def idx(self) -> np.ndarray:
"""
Expand Down Expand Up @@ -661,7 +689,7 @@ def _obs(self) -> pd.DataFrame:

:return: .obs data frame.
"""
# TODO Using loc indexing here instead of iloc, this might be faster on larger tables?

return pd.concat([
self.adata_by_key[k].obs.loc[self.adata_by_key[k].obs.index[v], :]
for k, v in self.indices.items()
Expand Down