Skip to content

Commit

Permalink
wavelets: improved wavelet reconstruction with additional constraints
Browse files Browse the repository at this point in the history
  • Loading branch information
JoepVanlier committed Jul 28, 2023
1 parent ea13908 commit 5d084da
Show file tree
Hide file tree
Showing 8 changed files with 137 additions and 2 deletions.
103 changes: 103 additions & 0 deletions lumicks/pylake/kymotracker/detail/denoising.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ def filter_image(self, image, false_detection_rate=0.1, verbose=False):
verbose : bool
Show extra output
"""
image = np.array(image, dtype=float) # integer arrays lose quality when convolved
detail_coefficients, remainder = self._calculate_wavelets(image, stabilize=True)

if verbose:
Expand All @@ -314,3 +315,105 @@ def filter_image(self, image, false_detection_rate=0.1, verbose=False):
output_image[output_image < 0] = 0

return output_image, significant

def filter_regularized(
self,
image,
false_detection_rate=0.1,
num_iter=10,
remove_background=True,
verbose=False,
):
"""Filter the image using a regularized wavelet reconstruction.
This reconstructs the image, but with the additional constraint that the
resulting image has to be positive and sparse (L1 regularization). This regularization
procedure helps, since the IUWT is overcomplete.
The regularization (shrinkage) is performed by shrinking coefficients towards zero. This
is done via a soft-thresholding procedure where coefficients below a threshold are set to
zero, while values above the threshold are shrunk towards zero. The threshold is then
decreased each step (this is a requirement for the algorithm). Note that at each step, the
significant structures are forced to be maintained.
As a result, we will get a sparse representation that fulfills positivity, while
keeping all the coefficients that were significant.
Parameters
----------
image : np.ndarray
Image array
false_detection_rate : float
Represents the False Detection Rate when it comes to pixels with significant
signal. The probability of erroneously detecting spots in a spot-free homogeneous
noise is upper bounded by this value.
num_iter : int
Number of iterations to run.
remove_background : bool
Remove the background after approximating the image? This amounts to not adding
the final approximation layer.
verbose : bool
Show some output while it is running.
"""
image = np.array(image, dtype=float) # integer arrays lose quality when convolved

# Grab the significant wavelet coefficients first.
_, significant = self.filter_image(image, false_detection_rate=false_detection_rate)

detailed_coeffs, remainder = self._calculate_wavelets(image, stabilize=False)

def positivity_projector(coefficients):
"""Calculate the coefficients that will lead to a fully positive reconstruction
This corresponds to the operator Qs2 in the paper [1].
Parameters
----------
coefficients : list of np.ndarray
Wavelet detail coefficients
"""
img = sum(coefficients) + remainder
positive_img = np.clip(img, 0, np.inf)
coefficients, rem = self._calculate_wavelets(positive_img, stabilize=False)
return coefficients

def significance_enforcer(coefficients):
"""Force significant structures to remain.
This corresponds to the operator Ps in the paper [1].
Parameters
----------
coefficients : list of np.ndarray
Wavelet detail coefficients
"""
for c, d, sig in zip(coefficients, detailed_coeffs, significant):
c[sig] = d[sig]
return coefficients

current_coeffs = [np.copy(d) for d in detailed_coeffs]
beta = 1.0

for k in range(num_iter):
positive = positivity_projector(current_coeffs)
next_solution = significance_enforcer(positive)

if verbose:
print(f"Iter: {k}: Beta={beta}")

def soft_threshold(detail_coeffs, treshold):
"""Soft-thresholding in the context of L1 optimization involves stepping towards
zero, and truncating any values below the stepsize to zero"""
detail_coeffs[abs(detail_coeffs) < treshold] = 0.0
detail_coeffs[detail_coeffs > treshold] -= treshold
detail_coeffs[detail_coeffs < -treshold] += treshold
return detail_coeffs

current_coeffs = [soft_threshold(d.copy(), beta) for d in next_solution]
beta -= 1.0 / num_iter

reconstructed_img = sum(current_coeffs)
if not remove_background:
reconstructed_img += remainder

return np.clip(reconstructed_img, 0.0, np.inf)
Git LFS file not shown
Git LFS file not shown
Git LFS file not shown
Git LFS file not shown
Git LFS file not shown
Git LFS file not shown
20 changes: 20 additions & 0 deletions lumicks/pylake/kymotracker/tests/test_denoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,3 +161,23 @@ def test_denoising_basic(two_dimensional, reference_data):
output_image, file_name="output_image_2d" if two_dimensional else "output_image_1d"
),
)


@pytest.mark.parametrize(
"two_dimensional, remove_background",
[(False, False), (False, True), (True, False), (True, True)],
)
def test_denoising_regularized(two_dimensional, remove_background, reference_data):
data, truth = denoising_source_image()
kernels = generate_bspline_kernels(3)
msvst = MultiScaleVarianceStabilizingTransform(kernels, two_dimensional=two_dimensional)
image = msvst.filter_regularized(
data, false_detection_rate=0.1, remove_background=remove_background
)
np.testing.assert_allclose(
image,
reference_data(
image,
file_name=f"{1 + two_dimensional}d_{'no_' if remove_background else ''}background",
),
)

0 comments on commit 5d084da

Please sign in to comment.