Code indexing in gitaly is broken and leads to code not being visible to the user. We work on the issue with highest priority.

Skip to content
Snippets Groups Projects
Commit bb095b69 authored by fische_r's avatar fische_r
Browse files

use dask-iamge for gaussian filter

parent 8c6e0dd0
No related branches found
No related tags found
No related merge requests found
...@@ -17,7 +17,8 @@ TODO: store git commit sha ...@@ -17,7 +17,8 @@ TODO: store git commit sha
# necessary packages for feature stack # necessary packages for feature stack
import numpy as np import numpy as np
from scipy import ndimage from scipy import ndimage
from skimage import filters # from skimage import filters
import dask_image.ndfilters
from skimage.morphology import ball from skimage.morphology import ball
import dask import dask
...@@ -104,65 +105,43 @@ class image_filter: ...@@ -104,65 +105,43 @@ class image_filter:
self.computed = False self.computed = False
# TODO: currently loads full dataset into memory, consider aligning desired chunk already for original dataset to avoid rechunking
# .rechunk() causes problems downstream: "Assertion error" , WTF?!
# if original data soe not fit in RAM, rechunk, store to disk and load again?
# rechunk apparently sometimes only adds an addtional graph layer linking initial chunks
# def open_raw_data(self):
# data = xr.open_dataset(self.data_path)
# da = dask.array.from_array(data.tomo.data, chunks = self.chunks)
# self.original_dataset = data
# self.data = da
# def open_lazy_data(self, chunks=None):
# if chunks is None:
# chunks = self.chunks
# data = xr.open_dataset(self.data_path, chunks = chunks)
# da = dask.array.from_array(data.tomo)
# # print('maybe re-introducing rechunking, but for large datasets auto might be ok')
# # print('smaller chunks might be better for slicewise training')
# # print('currently provided chunks are ignored')
# self.original_dataset = data#.rechunk(self.chunks)
# self.data = da
# def load_raw_data(self):
# data = xr.load_dataset(self.data_path)
# # da = dask.array.from_array(data.tomo).rechunk(chunks = self.chunks)
# da = dask.array.from_array(data.tomo.data, chunks = self.chunks)
# self.original_dataset = data
# self.data = da
def Gaussian_Blur_4D(self, sigma): def Gaussian_Blur_4D(self, sigma):
# TODO: check on boundary mode # TODO: check on boundary mode
deptharray = np.ones(self.data.ndim)+4*sigma # TODO: test dask-image Gaussian filter and use if it works
deptharray = tuple(np.min([deptharray, self.data.shape], axis=0)) # deptharray = np.ones(self.data.ndim)+4*sigma
G = self.data.map_overlap(filters.gaussian, depth=deptharray, boundary='nearest', sigma = sigma) # deptharray = tuple(np.min([deptharray, self.data.shape], axis=0))
# G = self.data.map_overlap(filters.gaussian, depth=deptharray, boundary='nearest', sigma = sigma)
G = dask_image.ndfilters.gaussian_filter(self.data, mode='nearest', sigma = sigma)
self.feature_names.append('Gaussian_4D_Blur_'+f'{sigma:.1f}') self.feature_names.append('Gaussian_4D_Blur_'+f'{sigma:.1f}')
self.calculated_features.append(G) self.calculated_features.append(G)
self.Gaussian_4D_dict[f'{sigma:.1f}'] = G self.Gaussian_4D_dict[f'{sigma:.1f}'] = G
def Gaussian_Blur_space(self, sigma): def Gaussian_Blur_space(self, sigma):
deptharray = np.ones(self.data.ndim)+4*sigma # deptharray = np.ones(self.data.ndim)+4*sigma
deptharray[-1] = 0 # deptharray[-1] = 0
sigmas = np.ones(deptharray.shape)*sigma # deptharray = tuple(np.min([deptharray, self.data.shape], axis=0))
deptharray = tuple(np.min([deptharray, self.data.shape], axis=0)) sigmas = np.ones(self.data.ndim)*sigma
sigmas[-1] = 0 sigmas[-1] = 0
G = self.data.map_overlap(filters.gaussian, depth=deptharray, boundary='nearest', sigma = sigmas)
# G = self.data.map_overlap(filters.gaussian, depth=deptharray, boundary='nearest', sigma = sigmas)
G = dask_image.ndfilters.gaussian_filter(self.data, mode='nearest', sigma = sigmas)
self.feature_names.append('Gaussian_space_'+f'{sigma:.1f}') self.feature_names.append('Gaussian_space_'+f'{sigma:.1f}')
self.calculated_features.append(G) self.calculated_features.append(G)
self.Gaussian_space_dict[f'{sigma:.1f}'] = G self.Gaussian_space_dict[f'{sigma:.1f}'] = G
def Gaussian_Blur_time(self, sigma): def Gaussian_Blur_time(self, sigma):
deptharray = np.ones(self.data.ndim)+4*sigma # deptharray = np.ones(self.data.ndim)+4*sigma
deptharray[:-1] = 0 # deptharray[:-1] = 0
sigmas = np.ones(deptharray.shape)*sigma # deptharray = tuple(np.min([deptharray, self.data.shape], axis=0))
deptharray = tuple(np.min([deptharray, self.data.shape], axis=0))
sigmas = np.ones(self.data.ndim)*sigma
sigmas[:-1] = 0 sigmas[:-1] = 0
G = self.data.map_overlap(filters.gaussian, depth=deptharray, boundary='nearest', sigma = sigmas) # G = self.data.map_overlap(filters.gaussian, depth=deptharray, boundary='nearest', sigma = sigmas)
G = dask_image.ndfilters.gaussian_filter(self.data, mode='nearest', sigma = sigmas)
self.feature_names.append('Gaussian_time_'+f'{sigma:.1f}') self.feature_names.append('Gaussian_time_'+f'{sigma:.1f}')
self.calculated_features.append(G) self.calculated_features.append(G)
self.Gaussian_time_dict[f'{sigma:.1f}'] = G self.Gaussian_time_dict[f'{sigma:.1f}'] = G
...@@ -443,7 +422,7 @@ class image_filter: ...@@ -443,7 +422,7 @@ class image_filter:
self.diff_Gaussian('time') self.diff_Gaussian('time')
self.Gaussian_space_stack() self.Gaussian_space_stack()
self.diff_Gaussian('space') self.diff_Gaussian('space')
# self.rank_filter_stack() #you have to load the entire raw data set for the dynamic part of this filter --> not so good for many time steps # self.rank_filter_stack() #dask_image might provide rank-like filters soon you have to load the entire raw data set for the dynamic part of this filter --> not so good for many time steps
self.time_stats() #does something similar like the dynamic rank filter, however only one pixel in space self.time_stats() #does something similar like the dynamic rank filter, however only one pixel in space
if self.loc_features: if self.loc_features:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment