Skip to content

denoiser

Base class and functions for all denoising algorithms.

@author: Nicola VIGANÒ, CEA-MEM, Grenoble, France

Classes:

Functions:

DataScaleBias dataclass

DataScaleBias(
    scale_inp: float | NDArray = 1.0,
    scale_out: float | NDArray = 1.0,
    scale_tgt: float | NDArray = 1.0,
    bias_inp: float | NDArray = 0.0,
    bias_out: float | NDArray = 0.0,
    bias_tgt: float | NDArray = 0.0,
)

Data scale and bias.

Denoiser

Denoiser(
    model: int | str | NetworkParams | Module | Mapping,
    data_scale_bias: DataScaleBias | None = None,
    reg_val: float | LossRegularizer | None = None,
    device: str = "cuda" if is_available() else "cpu",
    batch_size: int | None = None,
    augmentation: str | Sequence[str] | None = None,
    save_epochs_dir: str | None = None,
    verbose: bool = True,
)

Bases: ABC

Base denoising class.

Parameters:

  • model (str | NetworkParams | Module | Mapping | None) –

    Type of neural network to use or a specific network (or state) to use

  • data_scale_bias (DataScaleBias | None, default: None ) –

    Scale and bias of the input data, by default None

  • reg_val (float | None, default: None ) –

    Regularization value, by default 1e-5

  • device (str, default: 'cuda' if is_available() else 'cpu' ) –

    Device to use, by default "cuda" if cuda is available, otherwise "cpu"

  • save_epochs_dir (str | None, default: None ) –

    Directory where to save network states at each epoch. If None disabled, by default None

  • verbose (bool, default: True ) –

    Whether to produce verbose output, by default True

Methods:

  • infer

    Inference, given an initial stack of images.

  • train

    Training of the model, given the required input.

Attributes:

  • n_dims (int) –

    Returns the expected signal dimensions.

Source code in src/autoden/algorithms/denoiser.py
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
def __init__(
    self,
    model: int | str | NetworkParams | pt.nn.Module | Mapping,
    data_scale_bias: DataScaleBias | None = None,
    reg_val: float | LossRegularizer | None = None,
    device: str = "cuda" if pt.cuda.is_available() else "cpu",
    batch_size: int | None = None,
    augmentation: str | Sequence[str] | None = None,
    save_epochs_dir: str | None = None,
    verbose: bool = True,
) -> None:
    """Initialize the noise2noise method.

    Parameters
    ----------
    model : str | NetworkParams | pt.nn.Module | Mapping | None
        Type of neural network to use or a specific network (or state) to use
    data_scale_bias : DataScaleBias | None, optional
        Scale and bias of the input data, by default None
    reg_val : float | None, optional
        Regularization value, by default 1e-5
    device : str, optional
        Device to use, by default "cuda" if cuda is available, otherwise "cpu"
    save_epochs_dir : str | None, optional
        Directory where to save network states at each epoch.
        If None disabled, by default None
    verbose : bool, optional
        Whether to produce verbose output, by default True
    """
    if isinstance(model, int):
        if self.save_epochs_dir is None:
            raise ValueError("Directory for saving epochs not specified")

        model = load_model_state(self.save_epochs_dir, epoch_num=model)

    if isinstance(model, (str, NetworkParams, Mapping, pt.nn.Module)):
        self.model = create_network(model, device=device)
    else:
        raise ValueError(f"Invalid model {type(model)}")
    if verbose:
        get_num_parameters(self.model, verbose=True)

    if augmentation is None:
        augmentation = []
    elif isinstance(augmentation, str):
        augmentation = [augmentation.lower()]
    elif isinstance(augmentation, Sequence):
        augmentation = [str(a).lower() for a in augmentation]

    self.data_sb = data_scale_bias

    self.reg_val = reg_val
    self.device = device
    self.batch_size = batch_size
    self.augmentation = augmentation
    self.save_epochs_dir = save_epochs_dir
    self.verbose = verbose

n_dims property

n_dims: int

Returns the expected signal dimensions.

If the model is an instance of SerializableModel and has an init_params attribute containing the key "n_dims", this property returns the value associated with "n_dims". Otherwise, it defaults to 2.

Returns:

  • int

    The expected signal dimensions.

infer

infer(inp: NDArray) -> NDArray

Inference, given an initial stack of images.

Parameters:

  • inp (NDArray) –

    The input stack of images

Returns:

  • NDArray

    The denoised stack of images

Source code in src/autoden/algorithms/denoiser.py
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
def infer(self, inp: NDArray) -> NDArray:
    """Inference, given an initial stack of images.

    Parameters
    ----------
    inp : NDArray
        The input stack of images

    Returns
    -------
    NDArray
        The denoised stack of images
    """
    # Rescale input
    if self.data_sb is not None:
        inp = inp * self.data_sb.scale_inp - self.data_sb.bias_inp

    inp_t = data_to_tensor(inp, device=self.device, n_dims=self.n_dims)

    self.model.eval()
    with pt.inference_mode():
        out_t: pt.Tensor = self.model(inp_t)
        output = out_t.squeeze(dim=(0, 1)).to("cpu").numpy()

    # Rescale output
    if self.data_sb is not None:
        output = (output + self.data_sb.bias_out) / self.data_sb.scale_out

    return output

train abstractmethod

train(*args: Any, **kwds: Any) -> dict[str, NDArray]

Training of the model, given the required input.

Source code in src/autoden/algorithms/denoiser.py
465
466
467
@abstractmethod
def train(self, *args: Any, **kwds: Any) -> dict[str, NDArray]:
    """Training of the model, given the required input."""

compute_scaling_selfsupervised

compute_scaling_selfsupervised(
    inp: NDArray,
) -> DataScaleBias

Compute input data scaling and bias for self-supervised learning.

Parameters:

  • inp (NDArray) –

    Input data.

Returns:

  • DataScaleBias

    An instance of DataScaleBias containing the computed scaling and bias values.

Source code in src/autoden/algorithms/denoiser.py
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
def compute_scaling_selfsupervised(inp: NDArray) -> DataScaleBias:
    """
    Compute input data scaling and bias for self-supervised learning.

    Parameters
    ----------
    inp : NDArray
        Input data.

    Returns
    -------
    DataScaleBias
        An instance of DataScaleBias containing the computed scaling and bias values.
    """
    range_vals_inp = get_normalization_range(inp, percentile=0.001)

    sb = DataScaleBias()
    sb.scale_inp = 1 / (range_vals_inp[1] - range_vals_inp[0])
    sb.scale_out = sb.scale_tgt = sb.scale_inp

    sb.bias_inp = range_vals_inp[2] * sb.scale_inp
    sb.bias_out = sb.bias_tgt = sb.bias_inp

    return sb

compute_scaling_supervised

compute_scaling_supervised(
    inp: NDArray, tgt: NDArray
) -> DataScaleBias

Compute input and target data scaling and bias for supervised learning.

Parameters:

  • inp (NDArray) –

    Input data.

  • tgt (NDArray) –

    Target data.

Returns:

  • DataScaleBias

    An instance of DataScaleBias containing the computed scaling and bias values.

Source code in src/autoden/algorithms/denoiser.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
def compute_scaling_supervised(inp: NDArray, tgt: NDArray) -> DataScaleBias:
    """
    Compute input and target data scaling and bias for supervised learning.

    Parameters
    ----------
    inp : NDArray
        Input data.
    tgt : NDArray
        Target data.

    Returns
    -------
    DataScaleBias
        An instance of DataScaleBias containing the computed scaling and bias values.
    """
    range_vals_inp = get_normalization_range(inp, percentile=0.001)
    range_vals_tgt = get_normalization_range(tgt, percentile=0.001)

    sb = DataScaleBias()
    sb.scale_inp = 1 / (range_vals_inp[1] - range_vals_inp[0])
    sb.scale_tgt = 1 / (range_vals_tgt[1] - range_vals_tgt[0])
    sb.scale_out = sb.scale_tgt

    sb.bias_inp = range_vals_inp[2] * sb.scale_inp
    sb.bias_tgt = range_vals_tgt[2] * sb.scale_tgt
    sb.bias_out = sb.bias_tgt

    return sb

data_to_tensor

data_to_tensor(
    data: NDArray,
    device: str,
    n_dims: int = 2,
    spectral_axis: int | None = None,
    dtype: DTypeLike | None = float32,
) -> Tensor

Convert a NumPy array to a PyTorch tensor.

Parameters:

  • data (NDArray) –

    The input data to be converted to a tensor.

  • device (str) –

    The device to which the tensor should be moved (e.g., 'cpu', 'cuda').

  • n_dims (int, default: 2 ) –

    The number of dimensions to consider for the data shape, by default 2.

  • spectral_axis (int or None, default: None ) –

    The axis along which the spectral data is located, by default None.

  • dtype (DTypeLike or None, default: float32 ) –

    The data type to which the data should be converted, by default np.float32.

Returns:

  • Tensor

    The converted PyTorch tensor.

Notes

If spectral_axis is provided, the data is moved to the specified axis. Otherwise, the data is expanded to include an additional dimension. The data is then reshaped and converted to the specified data type before being converted to a PyTorch tensor and moved to the specified device.

Source code in src/autoden/algorithms/denoiser.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
def data_to_tensor(
    data: NDArray, device: str, n_dims: int = 2, spectral_axis: int | None = None, dtype: DTypeLike | None = np.float32
) -> pt.Tensor:
    """
    Convert a NumPy array to a PyTorch tensor.

    Parameters
    ----------
    data : NDArray
        The input data to be converted to a tensor.
    device : str
        The device to which the tensor should be moved (e.g., 'cpu', 'cuda').
    n_dims : int, optional
        The number of dimensions to consider for the data shape, by default 2.
    spectral_axis : int or None, optional
        The axis along which the spectral data is located, by default None.
    dtype : DTypeLike or None, optional
        The data type to which the data should be converted, by default np.float32.

    Returns
    -------
    pt.Tensor
        The converted PyTorch tensor.

    Notes
    -----
    If `spectral_axis` is provided, the data is moved to the specified axis.
    Otherwise, the data is expanded to include an additional dimension.
    The data is then reshaped and converted to the specified data type before
    being converted to a PyTorch tensor and moved to the specified device.
    """
    if spectral_axis is not None:
        num_channels = data.shape[spectral_axis]
        data = np.moveaxis(data, spectral_axis, -n_dims - 1)
    else:
        num_channels = 1
        data = np.expand_dims(data, -n_dims - 1)
    data_shape = data.shape[-n_dims:]
    data = data.reshape([-1, num_channels, *data_shape])
    if dtype is not None:
        data = data.astype(dtype)
    return pt.tensor(data, device=device)

get_flip_dims

get_flip_dims(n_dims: int) -> Sequence[tuple[int, ...]]

Generate all possible combinations of dimensions to flip for a given number of dimensions.

Parameters:

  • n_dims (int) –

    The number of dimensions.

Returns:

  • Sequence[tuple[int, ...]]

    A sequence of tuples, where each tuple represents a combination of dimensions to flip. The dimensions are represented by negative indices, ranging from -n_dims to -1.

Examples:

>>> _get_flip_dims(2)
[(), (-2,), (-1,), (-2, -1)]
Source code in src/autoden/algorithms/denoiser.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
def get_flip_dims(n_dims: int) -> Sequence[tuple[int, ...]]:
    """
    Generate all possible combinations of dimensions to flip for a given number of dimensions.

    Parameters
    ----------
    n_dims : int
        The number of dimensions.

    Returns
    -------
    Sequence[tuple[int, ...]]
        A sequence of tuples, where each tuple represents a combination of dimensions to flip.
        The dimensions are represented by negative indices, ranging from -n_dims to -1.

    Examples
    --------
    >>> _get_flip_dims(2)
    [(), (-2,), (-1,), (-2, -1)]
    """
    return sum([[*combinations(range(-n_dims, 0), d)] for d in range(n_dims + 1)], [])

get_normalization_range

get_normalization_range(
    vol: NDArray, percentile: float | None = None
) -> tuple[float, float, float]

Calculate the normalization range for a given volume.

Parameters:

  • vol (NDArray) –

    The input volume as a NumPy array.

  • percentile (float, default: None ) –

    The percentile to use for calculating the normalization range. If None, the minimum, maximum, and mean of the entire volume are used. Default is None.

Returns:

  • tuple[float, float, float]

    A tuple containing the minimum, maximum, and mean values of the volume within the specified percentile range. If percentile is None, the minimum, maximum, and mean of the entire volume are returned.

Notes

If percentile is provided, the function calculates the indices for the minimum and maximum values based on the specified percentile. The mean value is then calculated from the range between these indices.

Source code in src/autoden/algorithms/denoiser.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def get_normalization_range(vol: NDArray, percentile: float | None = None) -> tuple[float, float, float]:
    """
    Calculate the normalization range for a given volume.

    Parameters
    ----------
    vol : NDArray
        The input volume as a NumPy array.
    percentile : float, optional
        The percentile to use for calculating the normalization range. If None, the
        minimum, maximum, and mean of the entire volume are used. Default is None.

    Returns
    -------
    tuple[float, float, float]
        A tuple containing the minimum, maximum, and mean values of the volume within
        the specified percentile range. If `percentile` is None, the minimum, maximum,
        and mean of the entire volume are returned.

    Notes
    -----
    If `percentile` is provided, the function calculates the indices for the minimum
    and maximum values based on the specified percentile. The mean value is then
    calculated from the range between these indices.
    """
    if percentile is not None:
        vol_sort = np.sort(vol.flatten())
        ind_min = int(np.fmax(vol_sort.size * percentile, 0))
        ind_max = int(np.fmin(vol_sort.size * (1 - percentile), vol_sort.size - 1))
        return vol_sort[ind_min], vol_sort[ind_max], vol_sort[ind_min : ind_max + 1].mean()
    else:
        return vol.min(), vol.max(), vol.mean()

get_random_image_indices

get_random_image_indices(
    num_imgs: int, num_tst_ratio: float
) -> list

Return a list of random indices from 0 to num_imgs - 1.

Parameters:

  • num_imgs (int) –

    Total number of images.

  • num_tst_ratio (float) –

    Ratio of images to select.

Returns:

  • list

    List of random indices.

Source code in src/autoden/algorithms/denoiser.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def get_random_image_indices(num_imgs: int, num_tst_ratio: float) -> list:
    """Return a list of random indices from 0 to num_imgs - 1.

    Parameters
    ----------
    num_imgs : int
        Total number of images.
    num_tst_ratio : float
        Ratio of images to select.

    Returns
    -------
    list
        List of random indices.
    """
    num_tst_imgs = int(num_imgs * num_tst_ratio)
    return list(np.random.choice(num_imgs, size=num_tst_imgs, replace=False))

get_random_pixel_mask

get_random_pixel_mask(
    data_shape: Sequence[int] | NDArray,
    mask_pixel_ratio: float,
) -> NDArray

Generate a random pixel mask for a given data shape.

This function creates a mask where a specified ratio of pixels are set to True, effectively masking those pixels. The remaining pixels are set to True.

Parameters:

  • data_shape (Sequence[int] | NDArray) –

    The shape of the data array for which the mask is to be generated.

  • mask_pixel_ratio (float) –

    The ratio of pixels to be masked (set to True). Must be between 0 and 1.

Returns:

  • NDArray

    A boolean array of the same shape as data_shape with the specified ratio of pixels set to True.

Examples:

>>> data_shape = (10, 10)
>>> mask_pixel_ratio = 0.1
>>> mask = get_random_pixel_mask(data_shape, mask_pixel_ratio)
>>> print(mask)
Source code in src/autoden/algorithms/denoiser.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
def get_random_pixel_mask(data_shape: Sequence[int] | NDArray, mask_pixel_ratio: float) -> NDArray:
    """
    Generate a random pixel mask for a given data shape.

    This function creates a mask where a specified ratio of pixels are set to True,
    effectively masking those pixels. The remaining pixels are set to True.

    Parameters
    ----------
    data_shape : Sequence[int] | NDArray
        The shape of the data array for which the mask is to be generated.
    mask_pixel_ratio : float
        The ratio of pixels to be masked (set to True). Must be between 0 and 1.

    Returns
    -------
    NDArray
        A boolean array of the same shape as `data_shape` with the specified ratio
        of pixels set to True.

    Examples
    --------
    >>> data_shape = (10, 10)
    >>> mask_pixel_ratio = 0.1
    >>> mask = get_random_pixel_mask(data_shape, mask_pixel_ratio)
    >>> print(mask)
    """
    data_mask = np.zeros(data_shape, dtype=bool)
    rnd_inds = np.random.randint(low=0, high=data_mask.size, size=int(data_mask.size * mask_pixel_ratio))
    data_mask.flat[rnd_inds] = True
    return data_mask

random_flips

random_flips(
    *imgs: Tensor,
    flips: Sequence[tuple[int, ...]] | None = None
) -> Sequence[Tensor]

Randomly flip images.

Parameters:

  • *imgs (Tensor, default: () ) –

    The input images

  • flips (Sequence[tuple[int, ...]] | None, default: None ) –

    If None, it will call _get_flip_dims on the ndim of the first image. The flips to be selected from, by default None.

Returns:

  • Sequence[Tensor]

    The flipped images.

Source code in src/autoden/algorithms/denoiser.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
def random_flips(*imgs: pt.Tensor, flips: Sequence[tuple[int, ...]] | None = None) -> Sequence[pt.Tensor]:
    """Randomly flip images.

    Parameters
    ----------
    *imgs : torch.Tensor
        The input images
    flips : Sequence[tuple[int, ...]] | None, optional
        If None, it will call _get_flip_dims on the ndim of the first image.
        The flips to be selected from, by default None.

    Returns
    -------
    Sequence[torch.Tensor]
        The flipped images.
    """
    if flips is None:
        flips = get_flip_dims(imgs[0].ndim - 2)
    rand_val = np.random.randint(len(flips))

    flip = flips[rand_val]
    return [pt.flip(im, flip) for im in imgs]

random_rotations

random_rotations(
    *imgs: Tensor, dims: tuple[int, int] = (-2, -1)
) -> Sequence[Tensor]

Randomly rotate images.

Parameters:

  • *imgs (Tensor, default: () ) –

    The input images

  • dims (tuple[int, int], default: (-2, -1) ) –

    The dimensions to rotate, by default (-2, -1)

Returns:

  • Sequence[Tensor]

    The rotated images.

Source code in src/autoden/algorithms/denoiser.py
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
def random_rotations(*imgs: pt.Tensor, dims: tuple[int, int] = (-2, -1)) -> Sequence[pt.Tensor]:
    """Randomly rotate images.

    Parameters
    ----------
    *imgs : torch.Tensor
        The input images
    dims : tuple[int, int], optional
        The dimensions to rotate, by default (-2, -1)

    Returns
    -------
    Sequence[torch.Tensor]
        The rotated images.
    """
    rand_val = np.random.randint(4)

    if rand_val > 0:
        return [pt.rot90(im, k=rand_val, dims=dims) for im in imgs]
    else:
        return imgs