Skip to content

Commit

Permalink
Merge pull request #103 from boeddeker/master
Browse files Browse the repository at this point in the history
Fixes for numpy 2
  • Loading branch information
boeddeker authored Jun 18, 2024
2 parents 44bac08 + 9e96ac0 commit cce6092
Show file tree
Hide file tree
Showing 15 changed files with 177 additions and 79 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.7', '3.8', '3.9', '3.10']
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']

steps:
- uses: actions/checkout@v2
Expand Down
54 changes: 54 additions & 0 deletions .github/workflows/tests_numpy1.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
name: 'Tests for Numpy < 2'

on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:
build:

runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10']

steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}


- name: Install linux dependencies
run: |
trap 'echo -e "$ $BASH_COMMAND"' DEBUG
sudo apt-get update
sudo apt-get install libsndfile1 sox
- name: Install python dependencies
run: |
trap 'echo -e "$ $BASH_COMMAND"' DEBUG
python -m pip install --upgrade pip
pip install 'numpy<2' scipy Cython
pip install flake8 pytest pytest-cov codecov
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
pip install setuptools==57.5.0 # downgrade setuptools so pycallgraph can be installed (necessary for python >= 3.9)
pip install --editable .[all]
pip install git+https://github.com/fgnt/pb_bss.git#egg=pb_bss[all]
- name: Lint with flake8
run: |
# stop the build if there are Python syntax error or undefined names
#flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
#flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest -v
- name: Codecov
run: |
codecov
4 changes: 2 additions & 2 deletions paderbox/array/interval/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -846,10 +846,10 @@ def __getitem__(self, item):
def sum(self, axis=None, out=None):
"""
>>> a = ArrayInterval([True, True, False, False])
>>> np.sum(a)
>>> print(np.sum(a))
2
>>> a = ArrayInterval([True, False, False, True])
>>> np.sum(a)
>>> print(np.sum(a))
2
>>> np.sum(zeros(10))
0
Expand Down
20 changes: 10 additions & 10 deletions paderbox/array/sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,8 +320,8 @@ class SparseArray:
dtype is inferred from segments:
>>> a.dtype
dtype('float64')
>>> a.pad_value, type(a.pad_value)
(0.0, <class 'numpy.float64'>)
>>> print(a.pad_value, type(a.pad_value))
0.0 <class 'numpy.float64'>
Can't add segments with differing dtypes
>>> a[:3] = np.ones(3, dtype=np.float32)
Expand Down Expand Up @@ -446,11 +446,11 @@ def pad_value(self):
>>> a = SparseArray(10)
>>> a.pad_value # is None
>>> a[:5] = np.arange(5)
>>> a.pad_value
>>> print(a.pad_value)
0
>>> zeros(10).pad_value
>>> print(zeros(10).pad_value)
0.0
>>> full(10, 42).pad_value
>>> print(full(10, 42).pad_value)
42.0
"""
# _pad_value must be an array, otherwise pt.data.batch.example_to_device
Expand Down Expand Up @@ -905,8 +905,8 @@ def __getitem__(self, item):
>>> a[15:] = np.arange(5, dtype=np.float64) + 1
# Integer getitem
>>> a[0], a[9], a[10], a[14], a[15], a[18], a[-1]
(1.0, 1.0, 0.0, 0.0, 1.0, 4.0, 5.0)
>>> print(a[0], a[9], a[10], a[14], a[15], a[18], a[-1])
1.0 1.0 0.0 0.0 1.0 4.0 5.0
>>> a[21]
Traceback (most recent call last):
...
Expand Down Expand Up @@ -957,16 +957,16 @@ def __getitem__(self, item):
# Unknown time length
>>> a = zeros((None,))
>>> a[10]
>>> print(a[10])
0.0
>>> a[:10], np.asarray(a[:10])
(SparseArray(shape=(10,)), array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32))
>>> a[:5] = 1
>>> np.asarray(a[2:7])
array([1., 1., 1., 0., 0.], dtype=float32)
>>> a[0]
>>> print(a[0])
1.0
>>> a[10]
>>> print(a[10])
0.0
>>> a.persist_shape()
(5,)
Expand Down
20 changes: 11 additions & 9 deletions paderbox/io/hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import numpy as np

from distutils.version import LooseVersion
from packaging.version import parse as _parse


__all__ = ['dump_hdf5', 'update_hdf5', 'load_hdf5']
Expand Down Expand Up @@ -120,9 +120,9 @@ def update_hdf5(
>>> update_hdf5('peter', file, '/name', allow_overwrite=True)
>>> pprint(load_hdf5(file))
{'name': 'peter'}
>>> update_hdf5({'name': 1}, file, '/', allow_overwrite=True)
>>> update_hdf5({'name': 'Alice'}, file, '/', allow_overwrite=True)
>>> pprint(load_hdf5(file))
{'name': 1}
{'name': 'Alice'}
"""
if not isinstance(obj, dict):
path_split = path.rsplit('/', 1)
Expand Down Expand Up @@ -179,8 +179,9 @@ def load_hdf5(filename, path='/'):
... }
>>> dump_hdf5(ex, file, True)
>>> ex_load = load_hdf5(file)
>>> from pprint import pprint
>>> ex_load['fav_tensors']['kronecker2d'][0, 0]
>>> # from pprint import pprint
>>> from paderbox.utils.pretty import pprint
>>> print(ex_load['fav_tensors']['kronecker2d'][0, 0])
1.0
>>> pprint(ex_load)
{'age': 24,
Expand All @@ -193,7 +194,7 @@ def load_hdf5(filename, path='/'):
'fav_tensors': {'kronecker2d': array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]),
'levi_civita3d': array([[[ 0, 0, 0],
'levi_civita3d': array([[[ 0, 0, 0],
[ 0, 0, 1],
[ 0, -1, 0]],
<BLANKLINE>
Expand Down Expand Up @@ -403,7 +404,7 @@ def ckeck_exists():
h5file[cur_path] = item
# This query is necessary since h5py changed string
# handling after version 3.0.0 to dumping strings as bytes
if LooseVersion(h5py.__version__) >= '3.0.0':
if _parse(h5py.__version__) >= _parse('3.0.0'):
test_item = item.encode('utf-8')
else:
test_item = item
Expand Down Expand Up @@ -477,12 +478,13 @@ def __load_dict_from_hdf5__(cls, filename, path='/'):
@classmethod
def __recursively_load_dict_contents_from_group__(cls, h5file, path):
"""
>>> from paderbox.utils.pretty import pprint
>>> from paderbox.io.cache_dir import get_cache_dir
>>> file = get_cache_dir() / 'tmp.hdf5'
>>> ex = {'key': [1, 2, 3]}
>>> dump_hdf5(ex, file, True)
>>> ex_load = load_hdf5(file)
>>> ex_load
>>> pprint(ex_load)
{'key': [1, 2, 3]}
"""
import h5py
Expand All @@ -508,7 +510,7 @@ def __recursively_load_dict_contents_from_group__(cls, h5file, path):

# This query is necessary since h5py changed string
# handling after version 3.0.0 to dumping strings as bytes
if LooseVersion(h5py.__version__) >= '3.0.0':
if _parse(h5py.__version__) >= _parse('3.0.0'):
ans[key] = ans[key].decode()
if isinstance(ans[key], str) and ans[key] == 'None':
ans[key] = None
Expand Down
15 changes: 9 additions & 6 deletions paderbox/io/json_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,24 @@ class SummaryEncoder(Encoder):
>>> example = dict(a=np.random.uniform(size=(3, 4)))
>>> print(json.dumps(example, cls=SummaryEncoder, indent=2))
{
"a": "ndarray: shape (3, 4), dtype float64"
"a": "array(shape=(3, 4), dtype=float64)"
}
alternative:
>>> np.set_string_function(lambda a: f'array(shape={a.shape}, dtype={a.dtype})')
>>> example
>>> from paderbox.utils.pretty import pprint
>>> # np.set_string_function(lambda a: f'array(shape={a.shape}, dtype={a.dtype})') # removed in numpy 2.0 without replacement, see https://github.com/napari/napari/issues/6752
>>> pprint(example, max_array_length=0)
{'a': array(shape=(3, 4), dtype=float64)}
>>> np.set_string_function(None) # needed for pytest. np.set_string_function is not properly reseted.
"""

def default(self, obj):
if isinstance(obj, np.ndarray):
return 'ndarray: shape {}, dtype {}'.format(obj.shape, obj.dtype)
return f'array(shape={obj.shape}, dtype={obj.dtype})'
else:
return super().default(obj)
try:
return super().default(obj)
except TypeError:
return str(obj)


def dumps_json(
Expand Down
30 changes: 15 additions & 15 deletions paderbox/io/new_subdir.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def get_new_subdir(
# ToDo: Make this working.
# Will fail when calling os.listdir
else:
basedir.mkdir(parents=True)
basedir.mkdir(parents=True, exist_ok=True)

if Path('/net') in basedir.parents:
# If nt filesystem, assert not in /net/home
Expand All @@ -121,13 +121,13 @@ def get_new_subdir(
prefix_ = f'{prefix}_' if prefix else ''
_suffix = f'_{suffix}' if suffix else ''

for i in range(200):
for i in range(1000):
if id_naming == 'index':
if prefix is None and suffix is None:
dir_nrs = [
int(d)
for d in os.listdir(str(basedir))
if (basedir / d).is_dir() and d.isdigit()
int(d.name)
for d in os.scandir(str(basedir))
if d.is_dir() and d.name.isdigit()
]
_id = max(dir_nrs + [0]) + 1
else:
Expand All @@ -138,11 +138,11 @@ def remove_pre_suf(d):
)

dir_nrs = [
int(remove_pre_suf(d))
for d in os.listdir(str(basedir))
if (basedir / d).is_dir()
if fnmatch.fnmatch(d, f'{prefix_}*{_suffix}')
if remove_pre_suf(d).isdigit()
int(remove_pre_suf(d.name))
for d in os.scandir(str(basedir))
if d.is_dir()
if fnmatch.fnmatch(d.name, f'{prefix_}*{_suffix}')
if remove_pre_suf(d.name).isdigit()
]
dir_nrs += [0]
_id = max(dir_nrs) + 1
Expand Down Expand Up @@ -177,7 +177,7 @@ def remove_pre_suf(d):
return simu_dir
except FileExistsError:
# Catch race conditions
if i > 100:
if i > 200:
# After some tries,
# expect that something other went wrong
raise
Expand Down Expand Up @@ -253,8 +253,8 @@ class NameGenerator:
>>> ng = NameGenerator()
>>> ng()
'nice_tomato_fox'
>>> ng.possibilities() # With 22 million a collision is unlikely
22188920
>>> f'{ng.possibilities():_}' # With 22 million a collision is unlikely
'22_188_920'
>>> ng = NameGenerator(['adjectives', 'animals'])
>>> ng()
'regional_prawn'
Expand Down Expand Up @@ -321,8 +321,8 @@ def cli(
Args:
basedir:
id_naming: e.g. 'index', 'time', 'adjective_color_animal'
mkdir:
id_naming: e.g. 'index', 'time', 'adjective_color_animal' (default 'index')
mkdir: (default True)
prefix:
suffix:
consider_mpi:
Expand Down
Loading

0 comments on commit cce6092

Please sign in to comment.