diff --git a/.github/workflows/process_l3_test.yml b/.github/workflows/process_l3_test.yml deleted file mode 100644 index 97f8a034..00000000 --- a/.github/workflows/process_l3_test.yml +++ /dev/null @@ -1,47 +0,0 @@ -on: - pull_request: - types: [opened, reopened, synchronize, edited] - workflow_dispatch: - -jobs: - build: - runs-on: ubuntu-latest - name: process_l3_test - steps: - - name: Install Python - uses: actions/setup-python@v4 - with: - python-version: "3.8" - - name: Checkout repo - uses: actions/checkout@v3 - with: - path: "main" - token: ${{ secrets.GITHUB_TOKEN }} - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - pip install wheel - python3 -m pip install --upgrade setuptools - cd $GITHUB_WORKSPACE/main - pip install . - - name: Clone AWS Level 0 data repo for testing - env: - GITLAB_TOKEN : ${{ secrets.GITLAB_TOKEN }} - run: | - cd $GITHUB_WORKSPACE - git clone --depth 1 https://oauth2:${{ env.GITLAB_TOKEN }}@geusgitlab.geus.dk/glaciology-and-climate/promice/aws-l0.git - - name: Run data processing - env: - TEST_STATION: KPC_U CEN2 - shell: bash - run: | - mkdir $GITHUB_WORKSPACE/out/ - for i in $(echo ${{ env.TEST_STATION }} | tr ' ' '\n'); do - python3 $GITHUB_WORKSPACE/main/src/pypromice/process/get_l3.py -c $GITHUB_WORKSPACE/aws-l0/raw/config/$i.toml -i $GITHUB_WORKSPACE/aws-l0/raw -o $GITHUB_WORKSPACE/out/ - done - - name: Upload test output - uses: actions/upload-artifact@v3 - with: - name: result - path: out diff --git a/.github/workflows/process_l2_test.yml b/.github/workflows/process_test.yml similarity index 69% rename from .github/workflows/process_l2_test.yml rename to .github/workflows/process_test.yml index 810086cb..2e938e2a 100644 --- a/.github/workflows/process_l2_test.yml +++ b/.github/workflows/process_test.yml @@ -31,20 +31,34 @@ jobs: run: | cd $GITHUB_WORKSPACE git clone --depth 1 https://oauth2:${{ env.GITLAB_TOKEN }}@geusgitlab.geus.dk/glaciology-and-climate/promice/aws-l0.git - - name: Run L0 to L3 processing + - name: Run L0 to L2 processing env: TEST_STATION: KAN_U HUM shell: bash run: | mkdir $GITHUB_WORKSPACE/out/ - mkdir $GITHUB_WORKSPACE/out/L2/ + mkdir $GITHUB_WORKSPACE/out/L0toL2/ for i in $(echo ${{ env.TEST_STATION }} | tr ' ' '\n'); do - python3 $GITHUB_WORKSPACE/main/src/pypromice/process/get_l2.py -c $GITHUB_WORKSPACE/aws-l0/tx/config/$i.toml -i $GITHUB_WORKSPACE/aws-l0/tx -o $GITHUB_WORKSPACE/out/L2/ + python3 $GITHUB_WORKSPACE/main/src/pypromice/process/get_l2.py -c $GITHUB_WORKSPACE/aws-l0/tx/config/$i.toml -i $GITHUB_WORKSPACE/aws-l0/tx -o $GITHUB_WORKSPACE/out/L0toL2/ done -# mkdir $GITHUB_WORKSPACE/out/L3/ +# - name: Run L0 to L2 processing +# env: +# TEST_STATION: KAN_U HUM +# shell: bash +# run: | +# mkdir $GITHUB_WORKSPACE/out/L2toL3/ # for i in $(echo ${{ env.TEST_STATION }} | tr ' ' '\n'); do -# python3 $GITHUB_WORKSPACE/main/src/pypromice/process/get_l2tol3.py -i $GITHUB_WORKSPACE/out/L2/$i/$i_hour.nc -o $GITHUB_WORKSPACE/out/ -t 60min +# python3 $GITHUB_WORKSPACE/main/src/pypromice/process/get_l2tol3.py -i $GITHUB_WORKSPACE/out/L0toL2/$i/$i_hour.nc -o $GITHUB_WORKSPACE/out/L2toL3 -t 60min # done + - name: Run L0 to L3 processing + env: + TEST_STATION: KAN_U HUM + shell: bash + run: | + mkdir $GITHUB_WORKSPACE/out/L0toL3/ + for i in $(echo ${{ env.TEST_STATION }} | tr ' ' '\n'); do + python3 $GITHUB_WORKSPACE/main/src/pypromice/process/get_l2.py -c $GITHUB_WORKSPACE/aws-l0/tx/config/$i.toml -i $GITHUB_WORKSPACE/aws-l0/tx -o $GITHUB_WORKSPACE/out/L2/ + done - name: Upload test output uses: actions/upload-artifact@v3 with: diff --git a/setup.py b/setup.py index 194d358e..552a7da6 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ "pypromice.qc.percentiles": ["thresholds.csv"], "pypromice.postprocess": ["station_configurations.toml", "positions_seed.csv"], }, - install_requires=['numpy>=1.23.0', 'pandas>=1.5.0', 'xarray>=2022.6.0', 'toml', 'scipy>=1.9.0', 'Bottleneck', 'netcdf4', 'pyDataverse==0.3.1', 'eccodes', 'scikit-learn>=1.1.0'], + install_requires=['numpy>=1.23.0,<2.0.0', 'pandas>=1.5.0', 'xarray>=2022.6.0', 'toml', 'scipy>=1.9.0', 'Bottleneck', 'netcdf4', 'pyDataverse==0.3.1', 'eccodes', 'scikit-learn>=1.1.0'], # extras_require={'postprocess': ['eccodes','scikit-learn>=1.1.0']}, entry_points={ 'console_scripts': [ diff --git a/src/pypromice/process/L1toL2.py b/src/pypromice/process/L1toL2.py index 1a0087cb..c8b32bbc 100644 --- a/src/pypromice/process/L1toL2.py +++ b/src/pypromice/process/L1toL2.py @@ -86,7 +86,7 @@ def toL2( # filtering gps_lat, gps_lon and gps_alt based on the difference to a baseline elevation # right now baseline elevation is gapfilled monthly median elevation - baseline_elevation = (ds.gps_alt.to_series().resample('M').median() + baseline_elevation = (ds.gps_alt.to_series().resample('MS').median() .reindex(ds.time.to_series().index, method='nearest') .ffill().bfill()) mask = (np.abs(ds.gps_alt - baseline_elevation) < 100) & ds.gps_alt.notnull() @@ -327,7 +327,7 @@ def smoothTilt(da: xr.DataArray, threshold=0.2): # we calculate the moving standard deviation over a 3-day sliding window # hourly resampling is necessary to make sure the same threshold can be used # for 10 min and hourly data - moving_std_gap_filled = da.to_series().resample('H').median().rolling( + moving_std_gap_filled = da.to_series().resample('h').median().rolling( 3*24, center=True, min_periods=2 ).std().reindex(da.time, method='bfill').values # we select the good timestamps and gapfill assuming that @@ -354,7 +354,7 @@ def smoothRot(da: xr.DataArray, threshold=4): xarray.DataArray smoothed rotation measurements from inclinometer ''' - moving_std_gap_filled = da.to_series().resample('H').median().rolling( + moving_std_gap_filled = da.to_series().resample('h').median().rolling( 3*24, center=True, min_periods=2 ).std().reindex(da.time, method='bfill').values # same as for tilt with, in addition: diff --git a/src/pypromice/qc/persistence.py b/src/pypromice/qc/persistence.py index f59bf45c..5f5d55f4 100644 --- a/src/pypromice/qc/persistence.py +++ b/src/pypromice/qc/persistence.py @@ -161,6 +161,6 @@ def duration_consecutive_true( # assert series.dtype == bool cumsum = ((series.index - series.index[0]).total_seconds()/3600).to_series(index=series.index) is_first = series.astype("int").diff() == 1 - offset = (is_first * cumsum).replace(0, np.nan).fillna(method="ffill").fillna(0) + offset = (is_first * cumsum).replace(0, np.nan).ffill().fillna(0) return (cumsum - offset) * series