Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extend r.watershed test suite for differences between ram and seg versions #2482

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
160 changes: 160 additions & 0 deletions raster/r.watershed/testsuite/test_equal_ram_seg_output.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
"""
Name: test_equal_ram_seg_output.py
Purpose: Ensure equal output produced by the in-memory (ram) and
segmentation library (seg) versions of r.watershed.

Author: Michel Wortmann
Copyright: (C) 2022 by Michel Wortmann and the GRASS Development Team
Licence: This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
"""

from grass.gunittest.case import TestCase
from grass.gunittest.main import test


class TestEqualRamSegOutput(TestCase):
"""Test case for watershed module"""

elevation = "elevation"

output_precision = {
"accumulation": 1,
"tci": 0.01,
"spi": 0.01,
"drainage": 0,
"basin": 0,
"stream": 0,
"half_basin": 0,
"length_slope": 0.01,
"slope_steepness": 0.01,
}

tmp_input_rasters = ["random_fraction", "random_percent"]

inputs = [
{}, # required only
{"flags": "s"},
{"flags": "4"},
{"depression": "random_fraction"},
{"flow": "random_fraction"},
{"disturbed_land": "random_percent"},
{"blocking": "random_fraction"},
{"retention": "random_percent"},
]

@property
def outputs(self):
return list(self.output_precision)

@classmethod
def setUpClass(cls):
"""Ensures expected computational region and setup"""
# Always use the computational region of the raster elevation
cls.use_temp_region()
cls.runModule("g.region", raster=cls.elevation)

# random points raster
cls.runModule(
"r.random",
input=cls.elevation,
npoints=10000,
raster="random_fraction",
seed=1234,
overwrite=True,
)
cls.runModule(
"r.mapcalc",
expression="random_percent=random_fraction*100",
overwrite=True,
)

@classmethod
def tearDownClass(cls):
"""Remove the temporary region"""
cls.runModule("g.remove", flags="f", type="raster", name=cls.tmp_input_rasters)
cls.del_temp_region()

def tearDown(self):
"""Remove the outputs created from the watershed module

This is executed after each test run.
"""
self.runModule(
"g.remove",
flags="f",
type="raster",
pattern=",".join([o + "__*" for o in self.outputs]),
)

def same_ram_seg_output(self, outputs=None, **input_args):
"""Check if the output of the ram and seg version is the same."""

outputs = outputs or self.outputs

flags = dict(ram="", seg="m")
kw = dict(
elevation=self.elevation,
threshold=1000,
overwrite=True,
)
kw.update(input_args)
# run module with/without -m
for n, f in flags.items():
# add outputs
kw.update({o: "%s__%s" % (o, n) for o in outputs})
kw["flags"] = input_args.get("flags", "") + f
self.assertModule("r.watershed", **kw)

# check difference of outputs
msg = "ram and seg version output %s is not the same"
msg += " with input " + str(input_args) if input_args else ""
passes = []
for o in outputs:
# subTest unfortunately doesnt work here
# with self.subTest("Testing difference in ram vs seg output", output=o):
prec = self.output_precision.get(o, 0)
try:
self.assertRastersNoDifference(
"%s__ram" % o, "%s__seg" % o, prec, msg=msg % o
)
passes.append(True)
except AssertionError:
passes.append(False)
return passes

def test_same_ram_seg_output(self):

passes = []
for oi in self.inputs:
passes.append(self.same_ram_seg_output(**oi))

# create nice markdown table of matches
msg = (
"Output of ram and seg versions of r.watershed do not match:"
+ "\n\n"
+ self.md_table(passes)
+ "\n"
)

self.assertTrue(all([all(p) for p in passes]), msg=msg)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this test looks good. The only thing I would change is the number of reported failures. It's currently 1 from AssertionError if any tests fail. It would be useful to report the number of individual failed tests (red circles) somewhere.


def md_table(self, passes):
columns = lambda l: "| " + (" | ".join(map(str, l))) + " |"
strinpts = columns(
[", ".join(["%s=%s" % kw for kw in d.items()]) for d in self.inputs]
)
for ir in self.tmp_input_rasters:
strinpts = strinpts.replace("=" + ir, "")
msg = "| Output " + strinpts + "\n"
msg += columns(["---"] * (len(self.inputs) + 1)) + "\n"
symbols = {True: ":white_check_mark:", False: ":red_circle:"}
for o, p in zip(self.outputs, zip(*passes)):
sym = [symbols[b] for b in p]
msg += ("| %s " % o) + columns(sym) + "\n"
return msg


if __name__ == "__main__":
test()
Loading