Code indexing in gitaly is broken and leads to code not being visible to the user. We work on the issue with highest priority.

Skip to content
Snippets Groups Projects
Commit 4be4147f authored by tekin_g's avatar tekin_g
Browse files

working copy with the simple model

parent 7cf81c6c
No related branches found
No related tags found
No related merge requests found
......@@ -286,3 +286,48 @@ class RLN_model(Model):
return (prediction, estimatation1, update1)
class RLN_model_simple(Model):
def __init__(self, name):
super().__init__(name=name)
self.est_1 = Estimate(name="Estimate_1")
self.est_1_norm = BatchNormalization()
self.update_1 = Update(name="update_1")
self.update_1_norm = BatchNormalization()
self.u2 = Conv2D(8, (3, 3), (1, 1), padding="same", name="u2")
self.u3 = Conv2D(8, (3, 3), (1, 1), padding="same", name="u3")
self.final_norm = BatchNormalization()
def call(self, inputs):
original_image = inputs
estimatation1 = self.est_1((original_image, original_image))
estimation1_normed = s_sigmoid(self.est_1_norm(estimatation1)) # result_prelu_12_1
# here we create a tensor that has 10 channels:
# 1-8: are the results of U1
# 9: is the result of the h2 line
# 10: is the result of the h1 line
result_conv_2_fine1_c = tf.tile(estimation1_normed,[1,1,1,10])
# do a convolution that reduces channels by 2 called U2
result_conv_2_fine = self.u2(result_conv_2_fine1_c)
act_2_fine = s_sigmoid1(result_conv_2_fine)
# this creates an input tensor that has:
# 1-8 results of U1
# 9-18: the results of U2
# final convolution that reduces the output from 16 channels to 8 channels U3
result_conv_13 = self.u3(act_2_fine) # +result_conv_2_fined
normed_batch_13 = self.final_norm(result_conv_13)
result_prelu_13 = s_sigmoid(normed_batch_13) # tf.nn.leaky_relu(normed_batch_13,name='relu_1')
# averge over the remaining channels as the output
prediction = tf.reduce_mean(result_prelu_13, axis=3,
keepdims=True) # /tf.reduce_max(result_prelu_14)#*tf.reduce_max(normed_batch) #tf.multiply(result_prelu_7,result_prelu_1)
return (prediction, estimatation1)
\ No newline at end of file
......@@ -14,7 +14,7 @@ import time
import numpy as np
import tensorflow as tf
from RLN_single_Model import RLN_model
from RLN_single_Model import RLN_model, RLN_model_simple
import TF2ImageHelpers as images
......@@ -38,12 +38,11 @@ import logging
logging.basicConfig(filemode="a",encoding='utf-8', level=logging.DEBUG)
input_crop_shape = (1000,1000,1) #set to None for whole image
number_of_images_to_load =100 #set to -1 for all images
epochs = 40
model = RLN_model(name="test")
model = RLN_model_simple(name="test")
......@@ -53,7 +52,7 @@ i = tf.Variable(0, trainable=False, dtype=tf.int64)
ckpt = tf.train.Checkpoint(step=i, model=model)
manager = tf.train.CheckpointManager(ckpt, train_model_path, max_to_keep=5)
ckpt.restore(manager.latest_checkpoint)
logging.info("restore successful")
with tf.device('/CPU:0'):
dataset_original, (_,_) = images.move_dataset_to_mem(input_dir, ground_truthdir, number_of_images_to_load)
dataset_original = dataset_original.batch(1)
......
......@@ -21,7 +21,7 @@ input_dir = base_dir + "/train/input/"
learning_rate = 0.008
timestr = time.strftime("%Y%m%d-%H%M%S")
run_name = "/RLN_Bigger_Kernel_{}_{}/".format(learning_rate,timestr)
run_name = "/RLN_Final_Model_{}_{}/".format(learning_rate,timestr)
train_model_path = base_dir + '/train/model_rl' + run_name
train_output = base_dir + '/train/output_rl' + run_name
......@@ -44,15 +44,15 @@ logging.basicConfig(filename=log_dir+"out.log",filemode="a",encoding='utf-8', le
import TF2ImageHelpers as images
import tensorflow as tf
from RLN_single_Model import RLN_model
from RLN_single_Model import RLN_model, RLN_model_simple
input_crop_shape = (1000,1000,1) #set to None for whole image
number_of_images_to_load =100 #set to -1 for all images
epochs = 100
number_of_images_to_load =-1 #set to -1 for all images
epochs = 400
@tf.function
def get_mae(y_true, y_pred):
......@@ -100,8 +100,25 @@ def loss(y_true, prediction, estimatation1, update1,step):
return loss
model = RLN_model(name="test")
def loss_simple(y_true, prediction, estimatation1, step):
mse = 1.0 * get_mse(prediction, y_true) #+ 0.5 * get_max_se(prediction, y_true)
tf.summary.scalar("squared_error", mse,step)
SSIM = get_SSIM(y_true, prediction)
tf.summary.scalar("ssim", SSIM,step)
estimation_mse = get_mse(estimatation1, y_true)
tf.summary.scalar("estimation_mse", estimation_mse,step)
#mse2 = estimation_mse # -tf.log((1+self.SSIM2)/2)#-tf.log((1+self.SSIM1)/2)
# self.loss =0.1*self.mse2+1*self.mse-1.0*tf.log((1+self.SSIM)/2)#-k1*self.prediction_min
loss = 1 * mse - 1 * tf.math.log((1 + SSIM) / 2) + 0.1 * estimation_mse
tf.summary.scalar("loss", loss, step)
return loss
#model = RLN_model(name="test")
model = RLN_model_simple(name="test")
with tf.device('/CPU:0'):
dataset_original, (mean,var) = images.move_dataset_to_mem(input_dir, ground_truthdir, number_of_images_to_load)
......@@ -125,9 +142,9 @@ for epoch in range(0, epochs):
for (x_batch_train, y_batch_train) in dataset:
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
normalizer = tf.keras.layers.Normalization(axis=None)
normalizer.adapt(x_batch_train)
denormalizer = tf.keras.layers.Normalization(mean=normalizer.mean, variance=normalizer.variance, axis=None, invert=True)
#normalizer = tf.keras.layers.Normalization(axis=None)
#normalizer.adapt(x_batch_train)
#denormalizer = tf.keras.layers.Normalization(mean=normalizer.mean, variance=normalizer.variance, axis=None, invert=True)
with train_summary_writer.as_default(), tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
......@@ -136,11 +153,13 @@ for epoch in range(0, epochs):
#x_batch_train = normalizer(x_batch_train)
prediction, estimatation1, update1 = model(x_batch_train, training=True) # Logits for this minibatch
#prediction, estimatation1, update1 = model(x_batch_train, training=True) # Logits for this minibatch
prediction, estimatation1 = model(x_batch_train, training=True) # Logits for this minibatch
#prediction = denormalizer(prediction)
#estimatation1 = denormalizer(estimatation1)
#update1 = denormalizer(update1)
loss_value = loss(y_batch_train, prediction, estimatation1, update1,i)
#loss_value = loss(y_batch_train, prediction, estimatation1, update1,i)
loss_value = loss_simple(y_batch_train, prediction, estimatation1, i)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
......@@ -156,7 +175,7 @@ for epoch in range(0, epochs):
tf.summary.image("ground_truth", y_batch_train, i,max_outputs=3)
tf.summary.image("output", prediction, i,max_outputs=3)
tf.summary.image("estimation", estimatation1, i,max_outputs=3)
tf.summary.image("update", update1, i,max_outputs=3)
#tf.summary.image("update", update1, i,max_outputs=3)
if int(i) % 200 == 0:
save_path = manager.save()
......
import os
import sys
import time
import numpy as np
import tensorflow as tf
import TF2ImageHelpers as images
from RLN_single_Model import RLN_model_simple
base_dir = "/data"
ground_truthdir = base_dir + '/train/ground_truth/'
input_dir = base_dir + "/test/input/"
timestr = time.strftime("%Y%m%d-%H%M%S")
run_name = "/{}/".format(sys.argv[1])
train_model_path = base_dir + '/train/model_rl' + run_name
train_output = base_dir + '/train/output_rl' + run_name
test_output = base_dir + '/test/output_rl' + run_name
log_dir = base_dir + "/logs" + run_name
print(test_output)
if not os.path.exists(train_model_path) or not os.path.exists(train_output) or not os.path.exists(
test_output) or not os.path.exists(log_dir):
raise Exception("missing locations")
import logging
logging.basicConfig(filemode="a", encoding='utf-8', level=logging.DEBUG)
number_of_images_to_load = 100 # set to -1 for all images
epochs = 40
model = RLN_model_simple(name="test")
i = tf.Variable(0, trainable=False, dtype=tf.int64)
ckpt = tf.train.Checkpoint(step=i, model=model)
manager = tf.train.CheckpointManager(ckpt, train_model_path, max_to_keep=5)
ckpt.restore(manager.latest_checkpoint)
logging.info("restore sucessfull")
a = os.listdir(input_dir)
a = [i for i in a if i.split('.')[-1] == 'npz']
a = sorted(a, key=lambda x: int(x.split(".")[0]))
logging.debug(a)
for path_i in a:
logging.debug(path_i)
x = images.read_file(input_dir + path_i)
st = time.time()
x_i = model([x], training=False)
et = time.time()
dt = np.array(et - st)
np.savez_compressed(test_output + path_i + ".npz", x=x, f_x=x_i[0], dt=dt)
et2 = time.time()
i.assign_add(1)
logging.info("Run: {} calc time: {} save time: {} memory: {}".format(i.numpy(), dt, et2 - et,
tf.config.experimental.get_memory_info(
'GPU:0')))
......@@ -2,8 +2,8 @@ import os
import cv2
from pic_proc.helpers.image import display_y10
path = ("../data/focus1_1/1um/long/run1/")
from helpers.image import display_y10
path = ("/media/guney/9E79-C5A1/grid/3/")
print(os.getcwd())
a = os.listdir(path)
a = [i for i in a if i.split('.')[-1] == 'y10']
......
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from helpers.image import show_image
path = "/home/guney/Project/Richardson-Lucy-Net/data from server/traind model inference/"
a = os.listdir(path)
print(a)
a = [path+i for i in a if i.split('.')[-1] == 'npz']
print(a)
display = True
mse_f_x = []
mse_x = []
max_se_f_x = []
max_se_x =[]
var_sim_f_x =[]
var_sim_x_ = []
def var_similarity_multiscale(x,y,s):
sum = []
for i in range(0,x.shape[0],x.shape[0]//s):
for j in range(0,x.shape[1],x.shape[1]//s):
x_i = x[i:i+x.shape[0]//s,j:j+x.shape[1]//s]
y_i = y[i:i+x.shape[0]//s,j:j+x.shape[1]//s]
sum_i = var_similarity(x_i,y_i)
if sum_i is not None:
sum.append(sum_i)
return np.mean(sum)
def var_similarity(x,y):
cov_m = np.cov(x.flatten(),y.flatten())
if cov_m[0,0] == 0 or cov_m[1,1] == 0:
return None
return cov_m[0,1]/np.sqrt(cov_m[0,0]*cov_m[1,1])
def squared_error(x,y):
delta = (x.flatten() - y.flatten())**2
return delta
for data in a:
print("==========================================================")
print(data)
print("==========================================================")
data = np.load(data,allow_pickle=True)
inference = data["f_x"][0]
input = data["x"]
print(inference.shape)
if display:
show_image(inference,"inference",norm=(0,1))
show_image(input,"input")
if cv2.waitKey() == ord('c'):
print("exiting")
cv2.destroyAllWindows()
exit(0)
print("==========================================================")
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from helpers.image import show_image
......@@ -10,16 +11,42 @@ a = os.listdir(path)
a = [path+i for i in a if i.split('.')[-1] == 'npz']
display = False
mse_f_x = []
mse_x = []
max_se_f_x = []
max_se_x =[]
var_sim_f_x =[]
var_sim_x_ = []
def var_similarity_multiscale(x,y,s):
x = cv2.resize(x, (x.shape[1] // s, x.shape[0] // s))
y = cv2.resize(y, (y.shape[1] // s, y.shape[0] // s))
return var_similarity(x,y)
sum = []
for i in range(0,x.shape[0],x.shape[0]//s):
for j in range(0,x.shape[1],x.shape[1]//s):
x_i = x[i:i+x.shape[0]//s,j:j+x.shape[1]//s]
y_i = y[i:i+x.shape[0]//s,j:j+x.shape[1]//s]
sum_i = var_similarity(x_i,y_i)
if sum_i is not None:
sum.append(sum_i)
return np.mean(sum)
def var_similarity(x,y):
cov_m = np.cov(x.flatten(),y.flatten())
if cov_m[0,0] == 0 or cov_m[1,1] == 0:
return None
return cov_m[0,1]/np.sqrt(cov_m[0,0]*cov_m[1,1])
for data in a:
def squared_error(x,y):
delta = (x.flatten() - y.flatten())**2
return delta
for data in a[:50]:
print("==========================================================")
print(data)
print("==========================================================")
data = np.load(data,allow_pickle=True)
inference = data["f_x"][0]
input = data["x"][0]
......@@ -42,14 +69,60 @@ for data in a:
thresholded_diff_bgr_2 = cv2.cvtColor(thresholded_diff, cv2.COLOR_GRAY2BGR)
thresholded_diff_bgr[np.where((thresholded_diff_bgr_2 == [1.0, 1.0, 1.0]).all(axis=2))] += [0, 1.0, 0.0]
highlighted_image = cv2.addWeighted(cv2.cvtColor(inference, cv2.COLOR_GRAY2BGR), 0.1, thresholded_diff_bgr, 0.9, 0)
highlighted_image = cv2.addWeighted(cv2.cvtColor(inference, cv2.COLOR_GRAY2BGR), 0.3, thresholded_diff_bgr, 0.7, 0)
show_image(highlighted_image,"diff")
show_image(cv2.resize(inference, (inference.shape[1] // 2, inference.shape[0] // 2)), "rescaled")
if cv2.waitKey() == ord('c'):
print("exiting")
cv2.destroyAllWindows()
exit(0)
print()
for s in range(1,6):
print("scale: {} s(gt,f(x)) = {}, s(gt,x) = {}".format(s,var_similarity_multiscale(gt,inference,s),var_similarity_multiscale(gt,input,s)))
\ No newline at end of file
for s in range(1,2):
print("scale: {} s(gt,f(x)) = {}, s(gt,x) = {}".format(s,var_similarity_multiscale(gt,inference,s),var_similarity_multiscale(gt,input,s)))
print("==========================================================")
delta1 = squared_error(gt,inference)
delta2 = squared_error(gt,input)
# fig, ax = plt.subplots(1, 2, figsize=(15, 10))
# ax[0].hist(delta1, bins="auto")
# ax[0].set_yscale("log")
#
# ax[1].hist(delta2, bins="auto")
# ax[1].set_yscale("log")
# fig.show()
mse_f_x.append(np.mean(delta1))
mse_x.append(np.mean(delta2))
max_se_f_x.append(np.max(delta1))
max_se_x.append(np.max(delta2))
var_sim_x_.append(var_similarity(gt,input))
var_sim_f_x.append(var_similarity(gt,inference))
# print("mse(gt,f_x) = {}, mse(gt,x) = {}, max_se(gt,f_x) = {}, max_se(gt,x) = {}".format(np.mean(delta1),np.mean(delta2),np.max(delta1),np.max(delta2)))
fig, ax = plt.subplots(2, 3,figsize=(30,20))
ax[0,0].hist(mse_f_x, bins="auto")
ax[0,0].set_title("MSE f(x)")
ax[1,0].hist(mse_x, bins="auto")
ax[1,0].set_title("MSE x")
ax[0,1].hist(max_se_f_x, bins="auto")
ax[0,1].set_title("MAX SE f(x)")
ax[1,1].hist(max_se_x, bins="auto")
ax[1,1].set_title("MAX SE x")
ax[0,2].hist(var_sim_f_x, bins="auto")
ax[0,2].set_title("Similarity f(x)")
ax[1,2].hist(var_sim_x_, bins="auto")
ax[1,2].set_title("Similarity x")
fig.show()
\ No newline at end of file
import os
import cv2
import numpy as np
from helpers.image import read_y10, show_image
path = ("/media/guney/9E79-C5A1/grid/3/")
output= "test/"
if not os.path.exists(output):
os.makedirs(output)
print(os.getcwd())
a = os.listdir(path)
a = [i for i in a if i.split('.')[-1] == 'y10']
a = sorted(a,key = lambda x : int(x.split(".")[0]))
for i,path_i in enumerate(a):
print(path_i)
datat = 1 - (read_y10(path+path_i)/ ((2**10) - 1))
cv2.normalize(datat,datat,0,1,cv2.NORM_MINMAX)
print(np.max(datat),np.min(datat))
np.savez_compressed(output+str(i) + ".npz",datat)
#show_image(datat,"image")
#cv2.waitKey(0)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment