Skip to content

Commit

Permalink
Merge pull request #174 from IMSY-DKFZ/T77-OffByOneError
Browse files Browse the repository at this point in the history
T77 off by one error
  • Loading branch information
kdreher authored Sep 23, 2022
2 parents 7e9558c + ab72fc7 commit 0529b45
Show file tree
Hide file tree
Showing 8 changed files with 224 additions and 108 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ def __init__(self, pitch_mm=0.5,
number_detector_elements * pitch_mm / 2,
0, 0, 0, 50])
super(LinearArrayDetectionGeometry, self).__init__(
number_detector_elements=number_detector_elements,
detector_element_width_mm=detector_element_width_mm,
detector_element_length_mm=detector_element_length_mm,
center_frequency_hz=center_frequency_hz,
bandwidth_percent=bandwidth_percent,
sampling_frequency_mhz=sampling_frequency_mhz,
device_position_mm=device_position_mm,
field_of_view_extent_mm=field_of_view_extent_mm)
number_detector_elements=number_detector_elements,
detector_element_width_mm=detector_element_width_mm,
detector_element_length_mm=detector_element_length_mm,
center_frequency_hz=center_frequency_hz,
bandwidth_percent=bandwidth_percent,
sampling_frequency_mhz=sampling_frequency_mhz,
device_position_mm=device_position_mm,
field_of_view_extent_mm=field_of_view_extent_mm)
self.pitch_mm = pitch_mm
self.probe_width_mm = (number_detector_elements - 1) * self.pitch_mm

Expand All @@ -67,8 +67,17 @@ def get_detector_element_positions_base_mm(self) -> np.ndarray:

detector_positions = np.zeros((self.number_detector_elements, 3))

det_elements = np.arange(-int(self.number_detector_elements / 2),
int(self.number_detector_elements / 2)) * self.pitch_mm + 0.5 * self.pitch_mm
if self.number_detector_elements > 1:
if self.number_detector_elements % 2 == 0:
# even number of detector elements
det_elements = np.arange(-int(self.number_detector_elements / 2),
int(self.number_detector_elements / 2)) * self.pitch_mm + 0.5 * self.pitch_mm
else:
# odd number of detector elements
det_elements = np.arange(-int(self.number_detector_elements / 2),
int(self.number_detector_elements / 2) + 1) * self.pitch_mm
else:
det_elements = 0

detector_positions[:, 0] = det_elements

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,24 +102,9 @@

elem_pos = data.sensor_element_positions/1000;

% In case some detectors are defined at zeros or with negative values out
% of bounds, correct all of them with minimum needed correction of
% spacing dx.

min_x_pos = find(elem_pos(1, :) <= 0);
min_y_pos = find(elem_pos(2, :) <= 0);
x_correction = 0;
y_correction = 0;
if size(min_x_pos) > 0
x_correction = dx;
end

if size(min_y_pos) > 0
y_correction = dx;
end

elem_pos(1, :) = elem_pos(1, :) - 0.5 * kgrid.x_size + x_correction + dx * GEL_LAYER_HEIGHT;
elem_pos(2, :) = elem_pos(2, :) - 0.5 * kgrid.y_size + y_correction;
elem_pos(1, :) = elem_pos(1, :) - 0.5 * kgrid.x_size + dx * GEL_LAYER_HEIGHT;
elem_pos(2, :) = elem_pos(2, :) - 0.5 * kgrid.y_size;

num_elements = size(elem_pos, 2);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,32 +100,9 @@

elem_pos = data.sensor_element_positions/1000;

% In case some detectors are defined at zeros or with negative values out
% of bounds, correct all of them with minimum needed correction of the
% spacing dx.

min_x_pos = find(elem_pos(1, :) <= 0);
min_y_pos = find(elem_pos(2, :) <= 0);
min_z_pos = find(elem_pos(3, :) <= 0);
x_correction = 0;
y_correction = 0;
z_correction = 0;
if size(min_x_pos) > 0
x_correction = dx;
end

if size(min_y_pos) > 0
y_correction = dx;
end

if size(min_z_pos) > 0
z_correction = dx;
end


elem_pos(1, :) = elem_pos(1, :) - 0.5 * kgrid.x_size + x_correction + dx * GEL_LAYER_HEIGHT;
elem_pos(2, :) = elem_pos(2, :) - 0.5 * kgrid.y_size + y_correction;
elem_pos(3, :) = elem_pos(3, :) - 0.5 * kgrid.z_size + z_correction;
elem_pos(1, :) = elem_pos(1, :) - 0.5 * kgrid.x_size + dx * GEL_LAYER_HEIGHT;
elem_pos(2, :) = elem_pos(2, :) - 0.5 * kgrid.y_size;
elem_pos(3, :) = elem_pos(3, :) - 0.5 * kgrid.z_size;
num_elements = size(elem_pos, 2);

element_width = double(settings.detector_element_width_mm)/1000;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def reconstruction_algorithm(self, time_series_sensor_data, detection_geometry:
### ALGORITHM ITSELF ###

xdim, zdim, ydim, xdim_start, xdim_end, ydim_start, ydim_end, zdim_start, zdim_end = compute_image_dimensions(
detection_geometry, spacing_in_mm, speed_of_sound_in_m_per_s, self.logger)
detection_geometry, spacing_in_mm, self.logger)

if zdim == 1:
sensor_positions[:, 1] = 0 # Assume imaging plane
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def reconstruction_algorithm(self, time_series_sensor_data, detection_geometry:
### ALGORITHM ITSELF ###

xdim, zdim, ydim, xdim_start, xdim_end, ydim_start, ydim_end, zdim_start, zdim_end = compute_image_dimensions(
detection_geometry, spacing_in_mm, speed_of_sound_in_m_per_s, self.logger)
detection_geometry, spacing_in_mm, self.logger)

if zdim == 1:
sensor_positions[:, 1] = 0 # Assume imaging plane
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def reconstruction_algorithm(self, time_series_sensor_data, detection_geometry:
### ALGORITHM ITSELF ###

xdim, zdim, ydim, xdim_start, xdim_end, ydim_start, ydim_end, zdim_start, zdim_end = compute_image_dimensions(
detection_geometry, spacing_in_mm, speed_of_sound_in_m_per_s, self.logger)
detection_geometry, spacing_in_mm, self.logger)

if zdim == 1:
sensor_positions[:, 1] = 0 # Assume imaging plane
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,29 +50,31 @@ def get_apodization_factor(apodization_method: str = Tags.RECONSTRUCTION_APODIZA

return output


def bandpass_filter_with_settings(data: np.ndarray, global_settings: Settings, component_settings: Settings,
device: DetectionGeometryBase) -> np.ndarray:
"""
Applies corresponding bandpass filter which can be set in
`component_settings[Tags.BANDPASS_FILTER_METHOD]`, using Tukey window-based filter as default.
:param data: (numpy array) data to be filtered
:param global_settings: (Settings) settings for the whole simulation
:param component_settings: (Settings) settings for the reconstruction module
:param device:
:return: (numpy array) filtered data
"""
device: DetectionGeometryBase) -> np.ndarray:
"""
Applies corresponding bandpass filter which can be set in
`component_settings[Tags.BANDPASS_FILTER_METHOD]`, using Tukey window-based filter as default.
:param data: (numpy array) data to be filtered
:param global_settings: (Settings) settings for the whole simulation
:param component_settings: (Settings) settings for the reconstruction module
:param device:
:return: (numpy array) filtered data
"""

# select corresponding filtering method depending on tag in settings
if Tags.BANDPASS_FILTER_METHOD in component_settings:
if component_settings[Tags.BANDPASS_FILTER_METHOD] == Tags.TUKEY_BANDPASS_FILTER:
return tukey_bandpass_filtering_with_settings(data, global_settings,component_settings, device)
elif component_settings[Tags.BANDPASS_FILTER_METHOD] == Tags.BUTTERWORTH_BANDPASS_FILTER:
return butter_bandpass_filtering_with_settings(data, global_settings,component_settings, device)
else:
return tukey_bandpass_filtering_with_settings(data, global_settings,component_settings, device)
# select corresponding filtering method depending on tag in settings
if Tags.BANDPASS_FILTER_METHOD in component_settings:
if component_settings[Tags.BANDPASS_FILTER_METHOD] == Tags.TUKEY_BANDPASS_FILTER:
return tukey_bandpass_filtering_with_settings(data, global_settings, component_settings, device)
elif component_settings[Tags.BANDPASS_FILTER_METHOD] == Tags.BUTTERWORTH_BANDPASS_FILTER:
return butter_bandpass_filtering_with_settings(data, global_settings, component_settings, device)
else:
return tukey_bandpass_filtering_with_settings(data, global_settings,component_settings, device)
return tukey_bandpass_filtering_with_settings(data, global_settings, component_settings, device)
else:
return tukey_bandpass_filtering_with_settings(data, global_settings, component_settings, device)


def butter_bandpass_filtering(data: np.array, time_spacing_in_ms: float = None,
cutoff_lowpass: int = int(8e6), cutoff_highpass: int = int(0.1e6),
Expand Down Expand Up @@ -101,14 +103,15 @@ def butter_bandpass_filtering(data: np.array, time_spacing_in_ms: float = None,
high = 0.999999999
else:
high = (cutoff_highpass / nyquist)

b, a = butter(N=order, Wn=[high, low], btype='band')
y = lfilter(b, a, data)

return y


def butter_bandpass_filtering_with_settings(data: np.ndarray, global_settings: Settings, component_settings: Settings,
device: DetectionGeometryBase) -> np.ndarray:
device: DetectionGeometryBase) -> np.ndarray:
"""
Apply a butterworth bandpass filter of `order` with cutoff values at `cutoff_lowpass`
and `cutoff_highpass` MHz on the `data` using the scipy.signal.butter filter.
Expand Down Expand Up @@ -142,8 +145,8 @@ def butter_bandpass_filtering_with_settings(data: np.ndarray, global_settings: S


def tukey_bandpass_filtering(data: np.ndarray, time_spacing_in_ms: float = None,
cutoff_lowpass: int = int(8e6), cutoff_highpass: int = int(0.1e6),
tukey_alpha: float = 0.5, resampling_for_fft: bool =False) -> np.ndarray:
cutoff_lowpass: int = int(8e6), cutoff_highpass: int = int(0.1e6),
tukey_alpha: float = 0.5, resampling_for_fft: bool = False) -> np.ndarray:
"""
Apply a tukey bandpass filter with cutoff values at `cutoff_lowpass` and `cutoff_highpass` MHz
and a tukey window with alpha value of `tukey_alpha` inbetween on the `data` in Fourier space.
Expand All @@ -165,7 +168,7 @@ def tukey_bandpass_filtering(data: np.ndarray, time_spacing_in_ms: float = None,
raise ValueError("The highpass cutoff value must be lower than the lowpass cutoff value.")

# no resampling by default
resampling_factor = 1
resampling_factor = 1
original_size = data.shape[-1]
target_size = original_size

Expand All @@ -175,9 +178,9 @@ def tukey_bandpass_filtering(data: np.ndarray, time_spacing_in_ms: float = None,
order = 0
mode = 'constant'

target_size = int(2**(np.ceil(np.log2(original_size)))) # compute next larger power of 2
target_size = int(2**(np.ceil(np.log2(original_size)))) # compute next larger power of 2
resampling_factor = original_size/target_size
zoom_factors = [1]*data.ndim # resampling factor for each dimension
zoom_factors = [1]*data.ndim # resampling factor for each dimension
zoom_factors[-1] = 1.0/resampling_factor

data = zoom(data, zoom_factors, order=order, mode=mode)
Expand All @@ -199,7 +202,7 @@ def tukey_bandpass_filtering(data: np.ndarray, time_spacing_in_ms: float = None,
# transform data into Fourier space, multiply filter and transform back
data_in_fourier_space = np.fft.fft(data)
filtered_data_in_fourier_space = data_in_fourier_space * np.broadcast_to(window, np.shape(data_in_fourier_space))
filtered_data = np.fft.ifft(filtered_data_in_fourier_space).real
filtered_data = np.fft.ifft(filtered_data_in_fourier_space).real

# resample back to original size if necessary
if resampling_for_fft:
Expand Down Expand Up @@ -406,27 +409,52 @@ def preparing_reconstruction_and_obtaining_reconstruction_settings(
time_spacing_in_ms, torch_device)

def compute_image_dimensions(detection_geometry: DetectionGeometryBase, spacing_in_mm: float,
speed_of_sound_in_m_per_s: float, logger: Logger) -> Tuple[int, int, int, int, int,
int, int, int, int]:
logger: Logger) -> Tuple[int, int, int, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64]:
"""
compute size of beamformed image from field of view of detection geometry
Returns x,z,y dimensions of reconstructed image volume in pixels as well
as the range for each dimension as start and end pixels.
Computes size of beamformed image from field of view of detection geometry given the spacing.
:param detection_geometry: detection geometry with specified field of view
:type detection_geometry: DetectionGeometryBase
:param spacing_in_mm: space betwenn pixels in mm
:type spacing_in_mm: float
:param logger: logger for debugging purposes
:type logger: Logger
:returns: tuple with x,z,y dimensions of reconstructed image volume in pixels as well as the range for
each dimension as start and end pixels (xdim, zdim, ydim, xdim_start, xdim_end, ydim_start, ydim_end,
zdim_start, zdim_end)
:rtype: Tuple[int, int, int, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64]
"""

field_of_view = detection_geometry.field_of_view_extent_mm
logger.debug(f"Field of view: {field_of_view}")

xdim_start = int(np.round(field_of_view[0] / spacing_in_mm))
xdim_end = int(np.round(field_of_view[1] / spacing_in_mm))
zdim_start = int(np.round(field_of_view[2] / spacing_in_mm))
zdim_end = int(np.round(field_of_view[3] / spacing_in_mm))
ydim_start = int(np.round(field_of_view[4] / spacing_in_mm))
ydim_end = int(np.round(field_of_view[5] / spacing_in_mm))
def compute_for_one_dimension(start_in_mm: float, end_in_mm: float) -> Tuple[int, np.float64, np.float64]:
"""
Helper function to compute the image dimensions for a single dimension given a start and end point in mm.
Makes sure that image dimesion is an integer by flooring.
Spaces the pixels symmetrically between start and end.
:param start_in_mm: lower limit of the field of view in this dimension
:type start_in_mm: float
:param start_in_mm: upper limit of the field of view in this dimension
:type start_in_mm: float
:returns: tuple with number of pixels in dimension, lower and upper limit in pixels
:rtype: Tuple[int, np.float64, np.float64]
"""

start_temp = start_in_mm / spacing_in_mm
end_temp = end_in_mm / spacing_in_mm
dim_temp = np.abs(end_temp - start_temp)
dim = int(np.floor(dim_temp))
diff = np.abs(dim_temp - dim)
start = start_temp - np.sign(start_temp) * diff/2
end = end_temp - np.sign(end_temp) * diff/2
return dim, start, end

xdim = (xdim_end - xdim_start)
ydim = (ydim_end - ydim_start)
zdim = (zdim_end - zdim_start)
xdim, xdim_start, xdim_end = compute_for_one_dimension(field_of_view[0], field_of_view[1])
zdim, zdim_start, zdim_end = compute_for_one_dimension(field_of_view[2], field_of_view[3])
ydim, ydim_start, ydim_end = compute_for_one_dimension(field_of_view[4], field_of_view[5])

if xdim < 1:
xdim = 1
Expand Down Expand Up @@ -464,16 +492,19 @@ def compute_delay_and_sum_values(time_series_sensor_data: Tensor, sensor_positio
logger.debug(f'Number of pixels in X dimension: {xdim}, Y dimension: {ydim}, Z dimension: {zdim} '
f',number of sensor elements: {n_sensor_elements}')

x_offset = 0.5 if xdim % 2 == 0 else 0 # to ensure pixels are symmetrically arranged around the 0 like the
# sensor positions, add an offset of 0.5 pixels if the dimension is even

x = xdim_start + torch.arange(xdim, device=torch_device, dtype=torch.float32) + x_offset
y = ydim_start + torch.arange(ydim, device=torch_device, dtype=torch.float32)
if zdim == 1:
xx, yy, zz, jj = torch.meshgrid(torch.arange(xdim_start, xdim_end, device=torch_device),
torch.arange(ydim_start, ydim_end, device=torch_device),
torch.arange(zdim, device=torch_device),
torch.arange(n_sensor_elements, device=torch_device))
z = torch.arange(zdim, device=torch_device, dtype=torch.float32)
else:
xx, yy, zz, jj = torch.meshgrid(torch.arange(xdim_start, xdim_end, device=torch_device),
torch.arange(ydim_start, ydim_end, device=torch_device),
torch.arange(zdim_start, zdim_end, device=torch_device),
torch.arange(n_sensor_elements, device=torch_device))
z = zdim_start + torch.arange(zdim, device=torch_device, dtype=torch.float32)
j = torch.arange(n_sensor_elements, device=torch_device, dtype=torch.float32)

xx, yy, zz, jj = torch.meshgrid(x, y, z, j)
jj = jj.long()

delays = torch.sqrt((yy * spacing_in_mm - sensor_positions[:, 2][jj]) ** 2 +
(xx * spacing_in_mm - sensor_positions[:, 0][jj]) ** 2 +
Expand Down
Loading

0 comments on commit 0529b45

Please sign in to comment.