-
-
Notifications
You must be signed in to change notification settings - Fork 307
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Extend r.watershed test suite for differences between ram and seg versions #2482
Open
mwort
wants to merge
5
commits into
OSGeo:main
Choose a base branch
from
mwort:r_watershed_ram_seg_tests
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from all commits
Commits
Show all changes
5 commits
Select commit
Hold shift + click to select a range
739452e
Extend r.watershed test suite to check differences between the ram an…
7b7063b
Move ram-seg test to separate test case and file.
21947f2
Apply Black formatting to new r.watershed test file.
4d34323
Merge branch 'main' into r_watershed_ram_seg_tests
echoix 2b6f28b
Merge branch 'main' into r_watershed_ram_seg_tests
echoix File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
160 changes: 160 additions & 0 deletions
160
raster/r.watershed/testsuite/test_equal_ram_seg_output.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,160 @@ | ||
""" | ||
Name: test_equal_ram_seg_output.py | ||
Purpose: Ensure equal output produced by the in-memory (ram) and | ||
segmentation library (seg) versions of r.watershed. | ||
|
||
Author: Michel Wortmann | ||
Copyright: (C) 2022 by Michel Wortmann and the GRASS Development Team | ||
Licence: This program is free software under the GNU General Public | ||
License (>=v2). Read the file COPYING that comes with GRASS | ||
for details. | ||
""" | ||
|
||
from grass.gunittest.case import TestCase | ||
from grass.gunittest.main import test | ||
|
||
|
||
class TestEqualRamSegOutput(TestCase): | ||
"""Test case for watershed module""" | ||
|
||
elevation = "elevation" | ||
|
||
output_precision = { | ||
"accumulation": 1, | ||
"tci": 0.01, | ||
"spi": 0.01, | ||
"drainage": 0, | ||
"basin": 0, | ||
"stream": 0, | ||
"half_basin": 0, | ||
"length_slope": 0.01, | ||
"slope_steepness": 0.01, | ||
} | ||
|
||
tmp_input_rasters = ["random_fraction", "random_percent"] | ||
|
||
inputs = [ | ||
{}, # required only | ||
{"flags": "s"}, | ||
{"flags": "4"}, | ||
{"depression": "random_fraction"}, | ||
{"flow": "random_fraction"}, | ||
{"disturbed_land": "random_percent"}, | ||
{"blocking": "random_fraction"}, | ||
{"retention": "random_percent"}, | ||
] | ||
|
||
@property | ||
def outputs(self): | ||
return list(self.output_precision) | ||
|
||
@classmethod | ||
def setUpClass(cls): | ||
"""Ensures expected computational region and setup""" | ||
# Always use the computational region of the raster elevation | ||
cls.use_temp_region() | ||
cls.runModule("g.region", raster=cls.elevation) | ||
|
||
# random points raster | ||
cls.runModule( | ||
"r.random", | ||
input=cls.elevation, | ||
npoints=10000, | ||
raster="random_fraction", | ||
seed=1234, | ||
overwrite=True, | ||
) | ||
cls.runModule( | ||
"r.mapcalc", | ||
expression="random_percent=random_fraction*100", | ||
overwrite=True, | ||
) | ||
|
||
@classmethod | ||
def tearDownClass(cls): | ||
"""Remove the temporary region""" | ||
cls.runModule("g.remove", flags="f", type="raster", name=cls.tmp_input_rasters) | ||
cls.del_temp_region() | ||
|
||
def tearDown(self): | ||
"""Remove the outputs created from the watershed module | ||
|
||
This is executed after each test run. | ||
""" | ||
self.runModule( | ||
"g.remove", | ||
flags="f", | ||
type="raster", | ||
pattern=",".join([o + "__*" for o in self.outputs]), | ||
) | ||
|
||
def same_ram_seg_output(self, outputs=None, **input_args): | ||
"""Check if the output of the ram and seg version is the same.""" | ||
|
||
outputs = outputs or self.outputs | ||
|
||
flags = dict(ram="", seg="m") | ||
kw = dict( | ||
elevation=self.elevation, | ||
threshold=1000, | ||
overwrite=True, | ||
) | ||
kw.update(input_args) | ||
# run module with/without -m | ||
for n, f in flags.items(): | ||
# add outputs | ||
kw.update({o: "%s__%s" % (o, n) for o in outputs}) | ||
kw["flags"] = input_args.get("flags", "") + f | ||
self.assertModule("r.watershed", **kw) | ||
|
||
# check difference of outputs | ||
msg = "ram and seg version output %s is not the same" | ||
msg += " with input " + str(input_args) if input_args else "" | ||
passes = [] | ||
for o in outputs: | ||
# subTest unfortunately doesnt work here | ||
# with self.subTest("Testing difference in ram vs seg output", output=o): | ||
prec = self.output_precision.get(o, 0) | ||
try: | ||
self.assertRastersNoDifference( | ||
"%s__ram" % o, "%s__seg" % o, prec, msg=msg % o | ||
) | ||
passes.append(True) | ||
except AssertionError: | ||
passes.append(False) | ||
return passes | ||
|
||
def test_same_ram_seg_output(self): | ||
|
||
passes = [] | ||
for oi in self.inputs: | ||
passes.append(self.same_ram_seg_output(**oi)) | ||
|
||
# create nice markdown table of matches | ||
msg = ( | ||
"Output of ram and seg versions of r.watershed do not match:" | ||
+ "\n\n" | ||
+ self.md_table(passes) | ||
+ "\n" | ||
) | ||
|
||
self.assertTrue(all([all(p) for p in passes]), msg=msg) | ||
|
||
def md_table(self, passes): | ||
columns = lambda l: "| " + (" | ".join(map(str, l))) + " |" | ||
strinpts = columns( | ||
[", ".join(["%s=%s" % kw for kw in d.items()]) for d in self.inputs] | ||
) | ||
for ir in self.tmp_input_rasters: | ||
strinpts = strinpts.replace("=" + ir, "") | ||
msg = "| Output " + strinpts + "\n" | ||
msg += columns(["---"] * (len(self.inputs) + 1)) + "\n" | ||
symbols = {True: ":white_check_mark:", False: ":red_circle:"} | ||
for o, p in zip(self.outputs, zip(*passes)): | ||
sym = [symbols[b] for b in p] | ||
msg += ("| %s " % o) + columns(sym) + "\n" | ||
return msg | ||
|
||
|
||
if __name__ == "__main__": | ||
test() |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this test looks good. The only thing I would change is the number of reported failures. It's currently 1 from
AssertionError
if any tests fail. It would be useful to report the number of individual failed tests (red circles) somewhere.