Commit 352d7326 authored by feichtinger's avatar feichtinger
Browse files

investigation of outliers, question to Pavle and Jochem

parent 14f40f21
......@@ -29,3 +29,43 @@ round_epochs,val_loss,val_mean_squared_error,loss,mean_squared_error,mult_neuron
2000,0.0025868704594056956,0.0025868704594056956,0.0024322890632529955,0.0024322890632529955,4,elu,25,0.1
2000,0.002599074810559479,0.002599074810559479,0.002399700006917222,0.002399700006917222,4,tanh,25,0.01
2000,0.0026097255513872594,0.0026097255513872594,0.0024921591403834122,0.0024921591403834122,4,elu,10,0.01
2000,0.003166489454385648,0.003166489454385648,0.0033323422280049577,0.0033323422280049577,1,relu,10,0.01
2000,0.002881387112562995,0.002881387112562995,0.002893498278059069,0.002893498278059069,1,tanh,10,0.01
2000,0.002585706600870259,0.002585706600870259,0.0023360975306863903,0.0023360975306863903,4,relu,100,0.01
2000,0.0026586020729179463,0.0026586020729179463,0.0025653695892948994,0.0025653695892948994,2,relu,25,0.01
2000,0.0025980849490645935,0.0025980849490645935,0.0024614213280347813,0.0024614213280347813,2,tanh,10,0.01
2000,0.0026283658998621333,0.0026283658998621333,0.002492394493639298,0.002492394493639298,2,elu,25,0.01
2000,0.0028529011526337066,0.0028529011526337066,0.002762269386365047,0.002762269386365047,1,tanh,25,0.001
2000,0.002623069995465285,0.002623069995465285,0.002508934493741506,0.002508934493741506,4,tanh,10,0.1
2000,0.0025869440407055772,0.0025869440407055772,0.002401200404192278,0.002401200404192278,4,elu,50,0.1
2000,0.0025911404481652922,0.0025911404481652922,0.002378880360719155,0.002378880360719155,4,elu,100,0.01
2000,0.0038954336707959763,0.0038954336707959763,0.003988842025474785,0.003988842025474785,1,relu,25,0.001
2000,0.0034472997104273975,0.0034472997104273975,0.0035041030028415657,0.0035041030028415657,1,elu,25,0.1
2000,0.002674333525252464,0.002674333525252464,0.0025679578274717444,0.0025679578274717444,2,relu,10,0.1
2000,0.002563230611147473,0.002563230611147473,0.0023412639442283883,0.0023412639442283883,4,relu,100,0.1
2000,0.0025986896006025715,0.0025986896006025715,0.002383339228825887,0.002383339228825887,2,tanh,50,0.1
2000,0.0026154836645998028,0.0026154836645998028,0.002478169808484493,0.002478169808484493,4,tanh,10,0.001
2000,0.0025548947872573525,0.0025548947872573525,0.002358186761583891,0.002358186761583891,4,tanh,50,0.1
2000,0.0025509777254861697,0.0025509777254861697,0.002329890756961994,0.002329890756961994,4,tanh,100,0.001
2000,0.0030105544437472203,0.0030105544437472203,0.002964789998468565,0.002964789998468565,1,tanh,100,0.01
2000,0.0028226322895573063,0.0028226322895573063,0.0026990458713536812,0.0026990458713536812,1,tanh,100,0.1
2000,0.0025692494136889004,0.0025692494136889004,0.0023457782492653004,0.0023457782492653004,4,relu,50,0.01
2000,0.0026533417934577874,0.0026533417934577874,0.002542111934719737,0.002542111934719737,2,elu,10,0.1
2000,0.0026868248730241746,0.0026868248730241746,0.0025521459646884836,0.0025521459646884836,2,elu,50,0.01
2000,0.0032228536811818986,0.0032228536811818986,0.0032256931576869793,0.0032256931576869793,1,tanh,100,0.001
2000,0.005881815784127767,0.005881815784127767,0.006121427949228313,0.006121427949228313,1,relu,25,0.01
2000,0.002886747603453855,0.002886747603453855,0.003017354273579899,0.003017354273579899,1,elu,10,0.1
2000,0.009171747195624948,0.009171747195624948,0.009317878524902921,0.009317878524902921,1,relu,50,0.01
2000,0.005838221096985193,0.005838221096985193,0.006080712903848656,0.006080712903848656,1,relu,50,0.1
2000,0.0025822016419772427,0.0025822016419772427,0.0023815557695796084,0.0023815557695796084,4,relu,25,0.001
2000,0.0025643583847167217,0.0025643583847167217,0.0023549425316183267,0.0023549425316183267,4,relu,50,0.1
2000,0.0026881263312909984,0.0026881263312909984,0.0026630788356806472,0.0026630788356806472,2,relu,25,0.001
2000,0.005169429620522828,0.005169429620522828,0.0053236245773575315,0.0053236245773575315,1,elu,100,0.1
2000,0.0025726734568703993,0.0025726734568703993,0.0023285020361662684,0.0023285020361662684,4,tanh,100,0.01
2000,0.002668826950236401,0.002668826950236401,0.002454131856995559,0.002454131856995559,2,elu,100,0.001
2000,0.002611684623705211,0.002611684623705211,0.0024994517592844857,0.0024994517592844857,4,elu,10,0.1
2000,0.002997897956924411,0.002997897956924411,0.003150531323241222,0.003150531323241222,1,elu,10,0.01
2000,0.002641849092308062,0.002641849092308062,0.002415844355312176,0.002415844355312176,2,relu,100,0.1
2000,0.00678495609168548,0.00678495609168548,0.007442952361214802,0.007442952361214802,1,relu,10,0.1
2000,0.002581694619317881,0.002581694619317881,0.002333750266242213,0.002333750266242213,4,tanh,100,0.1
2000,0.0026340905024072334,0.0026340905024072334,0.0024286892183234252,0.0024286892183234252,2,relu,50,0.1
This diff is collapsed.
......@@ -15,7 +15,7 @@
# + {"toc": true, "cell_type": "markdown"}
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Configuration" data-toc-modified-id="Configuration-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Configuration</a></span></li><li><span><a href="#Support-Routines" data-toc-modified-id="Support-Routines-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Support Routines</a></span><ul class="toc-item"><li><span><a href="#Visualization" data-toc-modified-id="Visualization-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Visualization</a></span></li></ul></li><li><span><a href="#Dataset-creation" data-toc-modified-id="Dataset-creation-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Dataset creation</a></span><ul class="toc-item"><li><span><a href="#Dataset-reading-and-preprocessing-definition" data-toc-modified-id="Dataset-reading-and-preprocessing-definition-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Dataset reading and preprocessing definition</a></span></li><li><span><a href="#Make-dataset" data-toc-modified-id="Make-dataset-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Make dataset</a></span></li><li><span><a href="#Training/Test-Split" data-toc-modified-id="Training/Test-Split-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Training/Test Split</a></span></li><li><span><a href="#Data-scaling-for-DNN-training" data-toc-modified-id="Data-scaling-for-DNN-training-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Data scaling for DNN training</a></span></li></ul></li><li><span><a href="#DNN-Model-definitions" data-toc-modified-id="DNN-Model-definitions-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>DNN Model definitions</a></span><ul class="toc-item"><li><span><a href="#L2reg-and-gaussian-noise" data-toc-modified-id="L2reg-and-gaussian-noise-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>L2reg and gaussian noise</a></span></li><li><span><a href="#Model-with-Dropout" data-toc-modified-id="Model-with-Dropout-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Model with Dropout</a></span></li></ul></li><li><span><a href="#DNN-Training-runs" data-toc-modified-id="DNN-Training-runs-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>DNN Training runs</a></span><ul class="toc-item"><li><span><a href="#Andi's-initial-DNN-using-gn" data-toc-modified-id="Andi's-initial-DNN-using-gn-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Andi's initial DNN using gn</a></span></li><li><span><a href="#without-any-regularization" data-toc-modified-id="without-any-regularization-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>without any regularization</a></span></li><li><span><a href="#Trying-to-reproduce-best-hyperscan-run" data-toc-modified-id="Trying-to-reproduce-best-hyperscan-run-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Trying to reproduce best hyperscan run</a></span></li><li><span><a href="#Try-out-DNN-with-dropout" data-toc-modified-id="Try-out-DNN-with-dropout-5.4"><span class="toc-item-num">5.4&nbsp;&nbsp;</span>Try out DNN with dropout</a></span></li></ul></li><li><span><a href="#Hyperparameter-scans" data-toc-modified-id="Hyperparameter-scans-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Hyperparameter scans</a></span><ul class="toc-item"><li><span><a href="#Test:-Make-a-hyperparameter-scan-A" data-toc-modified-id="Test:-Make-a-hyperparameter-scan-A-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>Test: Make a hyperparameter scan A</a></span></li><li><span><a href="#Offline-batch-parameter-scan" data-toc-modified-id="Offline-batch-parameter-scan-6.2"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>Offline batch parameter scan</a></span></li><li><span><a href="#TODO-ModelB" data-toc-modified-id="TODO-ModelB-6.3"><span class="toc-item-num">6.3&nbsp;&nbsp;</span>TODO ModelB</a></span></li><li><span><a href="#TODO:-Model-C:-scan-regulatisation-and-noise" data-toc-modified-id="TODO:-Model-C:-scan-regulatisation-and-noise-6.4"><span class="toc-item-num">6.4&nbsp;&nbsp;</span>TODO: Model C: scan regulatisation and noise</a></span></li></ul></li><li><span><a href="#SVM-to-see-what-a-linear-model-can-do" data-toc-modified-id="SVM-to-see-what-a-linear-model-can-do-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>SVM to see what a linear model can do</a></span></li></ul></div>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Configuration" data-toc-modified-id="Configuration-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Configuration</a></span></li><li><span><a href="#Support-Routines" data-toc-modified-id="Support-Routines-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Support Routines</a></span><ul class="toc-item"><li><span><a href="#Visualization" data-toc-modified-id="Visualization-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Visualization</a></span></li></ul></li><li><span><a href="#Dataset-creation" data-toc-modified-id="Dataset-creation-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Dataset creation</a></span><ul class="toc-item"><li><span><a href="#Dataset-reading-and-preprocessing-definition" data-toc-modified-id="Dataset-reading-and-preprocessing-definition-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Dataset reading and preprocessing definition</a></span></li><li><span><a href="#Make-dataset" data-toc-modified-id="Make-dataset-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Make dataset</a></span></li><li><span><a href="#Training/Test-Split" data-toc-modified-id="Training/Test-Split-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Training/Test Split</a></span></li><li><span><a href="#Data-scaling-for-DNN-training" data-toc-modified-id="Data-scaling-for-DNN-training-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Data scaling for DNN training</a></span></li></ul></li><li><span><a href="#DNN-Model-definitions" data-toc-modified-id="DNN-Model-definitions-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>DNN Model definitions</a></span><ul class="toc-item"><li><span><a href="#L2reg-and-gaussian-noise" data-toc-modified-id="L2reg-and-gaussian-noise-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>L2reg and gaussian noise</a></span></li><li><span><a href="#Model-with-Dropout" data-toc-modified-id="Model-with-Dropout-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Model with Dropout</a></span></li></ul></li><li><span><a href="#DNN-Training-runs" data-toc-modified-id="DNN-Training-runs-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>DNN Training runs</a></span><ul class="toc-item"><li><span><a href="#Andi's-initial-DNN-using-gn" data-toc-modified-id="Andi's-initial-DNN-using-gn-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Andi's initial DNN using gn</a></span></li><li><span><a href="#without-any-regularization" data-toc-modified-id="without-any-regularization-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>without any regularization</a></span><ul class="toc-item"><li><span><a href="#Investigation-of-model-performance-errors" data-toc-modified-id="Investigation-of-model-performance-errors-5.2.1"><span class="toc-item-num">5.2.1&nbsp;&nbsp;</span>Investigation of model performance errors</a></span></li></ul></li><li><span><a href="#Trying-to-reproduce-best-hyperscan-run" data-toc-modified-id="Trying-to-reproduce-best-hyperscan-run-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Trying to reproduce best hyperscan run</a></span></li><li><span><a href="#Try-out-DNN-with-dropout" data-toc-modified-id="Try-out-DNN-with-dropout-5.4"><span class="toc-item-num">5.4&nbsp;&nbsp;</span>Try out DNN with dropout</a></span></li></ul></li><li><span><a href="#Hyperparameter-scans" data-toc-modified-id="Hyperparameter-scans-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Hyperparameter scans</a></span><ul class="toc-item"><li><span><a href="#Test:-Make-a-hyperparameter-scan-A" data-toc-modified-id="Test:-Make-a-hyperparameter-scan-A-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>Test: Make a hyperparameter scan A</a></span></li><li><span><a href="#Offline-batch-parameter-scan" data-toc-modified-id="Offline-batch-parameter-scan-6.2"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>Offline batch parameter scan</a></span></li><li><span><a href="#TODO-ModelB" data-toc-modified-id="TODO-ModelB-6.3"><span class="toc-item-num">6.3&nbsp;&nbsp;</span>TODO ModelB</a></span></li><li><span><a href="#TODO:-Model-C:-scan-regulatisation-and-noise" data-toc-modified-id="TODO:-Model-C:-scan-regulatisation-and-noise-6.4"><span class="toc-item-num">6.4&nbsp;&nbsp;</span>TODO: Model C: scan regulatisation and noise</a></span></li></ul></li><li><span><a href="#SVM-to-see-what-a-linear-model-can-do" data-toc-modified-id="SVM-to-see-what-a-linear-model-can-do-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>SVM to see what a linear model can do</a></span></li></ul></div>
# +
import os
......@@ -31,6 +31,7 @@ import numpy as np
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats
import tensorflow
import keras
......@@ -47,6 +48,8 @@ import talos as ta
#matplotlib inline
# -
import itertools
# # Configuration
#topdir = "/psi/home/adelmann/data/ml-gasmon/"
......@@ -79,11 +82,13 @@ def densitycolor(x, y):
indices = density.argsort()
return x[indices], y[indices], density[indices]
def coloredscatter(x, y, alpha=1.0, cmap="Blues"):
x, y, z = densitycolor(x, y)
paths = plt.scatter(x, y, c=z, s=50, edgecolor='', alpha=alpha, cmap=cmap)
plt.colorbar(paths)
def coloredscatter(x, y, alpha=1.0, cmap="Blues", ax=None):
if ax is None:
fig, ax = plt.subplots()
x, y, z = densitycolor(x, y)
paths = ax.scatter(x, y, c=z, s=50, edgecolor='', alpha=alpha, cmap=cmap)
plt.colorbar(paths, ax=ax)
# +
......@@ -578,6 +583,108 @@ print_model_err(model_0_noreg, x_test, y_test)
# %%time
plotModelPerf2(model_0_noreg, 'Model0 No Regularization', x_test, y_test, test[var_dep].columns, transformer_y, '.')
# %%time
model_0_noreg_m4 = build_ff_mdl_small(in_dim = x_train.shape[1], out_dim = y_train.shape[1],
l1 = 4*8, l2 = 4*6, l3 = 4*4, l4= 4*4,
l2reg=0.0, gn=0.0)
#mc = keras.callbacks.ModelCheckpoint('best_model_1.h5', monitor='val_loss', mode='min', save_best_only=True)
es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)
hist_0_noreg_m4 = model_0_noreg_m4.fit(x=x_train, y=y_train,
validation_data=(x_validate,y_validate),
batch_size=250, shuffle='true',epochs=2000,
verbose='false', callbacks=[plot_losses,es])
model_0_noreg_m4.save('model_0_noreg_m4.h5')
pd.DataFrame(hist_0_noreg_m4.history).to_csv('model_0_noreg_m4-hist.csv')
print_model_err(model_0_noreg_m4, x_test, y_test)
model_0_noreg_m4 = keras.models.load_model('model_0_noreg_m4.h5')
# %%time
plotModelPerf2(model_0_noreg_m4, 'Model0 m4 No Regularization', x_test, y_test, test[var_dep].columns, transformer_y, '.')
# ### Investigation of model performance errors
# The biggest departures of the predicted energy from the ground truth energies happen in series at certain discrete values of the predicted energy. Try to get some information about this systematic behavior.
y_predict = transformer_y.inverse_transform(model_0_noreg_m4.predict(x_test))
# Use the original data (`test`) that contains the untransformed values and also the file names.
y_predict
test['PHOTON-ENERGY-PER-PULSE'].values[np.newaxis].transpose()
# %matplotlib notebook
fig,ax = plt.subplots(figsize=(15,10))
ax.plot(y_predict, test['PHOTON-ENERGY-PER-PULSE'], marker='o', linestyle='', alpha=0.1)
# the colorplot makes the kernel die
# coloredscatter(y_predict, test['PHOTON-ENERGY-PER-PULSE'].values[np.newaxis].transpose(), ax=ax)
ax.set_xlabel('PEPP predicted')
ax.set_ylabel('PEPP')
def modelPerfPerFile(data, legend=True, ax=None):
"Plot performance with color code based on filenames"
if ax is None:
fig, ax = plt.subplots(figsize=(15,10))
markerseq = itertools.cycle(('+', 'v', 'x', 's', 'D', 'X'))
for name, group in data.groupby('rawDataFile'):
plt.plot(group['PEPP_predict'], group['PHOTON-ENERGY-PER-PULSE'], alpha=1.0, marker=next(markerseq), linestyle='', label=name)
ax.set_xlabel('PEPP predicted')
ax.set_ylabel('PEPP')
if legend:
ax.legend()
tst_df = test
tst_df['PEPP_predict'] = y_predict
modelPerfPerFile(tst_df, legend=False)
# +
tst_df = test
tst_df['PEPP_predict'] = y_predict
thresh = 40.0
tst1_df = tst_df[ (tst_df['PHOTON-ENERGY-PER-PULSE'] - tst_df['PEPP_predict']).abs() > thresh]
tst2_df = tst_df[ (tst_df['PHOTON-ENERGY-PER-PULSE'] - tst_df['PEPP_predict']).abs() <= thresh]
fig, ax = plt.subplots(figsize=(15,10))
#ax.plot(y_predict, test['PHOTON-ENERGY-PER-PULSE'], marker='o', linestyle='', alpha=0.01)
ax.plot(tst2_df['PEPP_predict'], tst2_df['PHOTON-ENERGY-PER-PULSE'], marker='o', linestyle='',
alpha=0.05, label='')
modelPerfPerFile(tst1_df, legend=True, ax=ax)
# -
tst2_df.shape
# Prepare a cut of the data containing interesting mispredicted areas
#
# I pick a region with a substantial number of errors
cutfilter = (y_predict >= 110) & (y_predict <= 115)
tst_df = test[cutfilter]
tst_df['PEPP_predict'] = y_predict[cutfilter]
modelPerfPerFile(tst_df)
sns.pairplot(tst_df[['CALCT', 'CALCS', 'SPECTRUM_CENTER','XeMultVoltag','PHOTON-ENERGY-PER-PULSE','PEPP_predict']])
# Let's look at an area where we have good predictions
cutfilter = (y_predict >= 130) & (y_predict <= 200)
tst_df = test[cutfilter]
tst_df['PEPP_predict'] = y_predict[cutfilter]
modelPerfPerFile(tst_df)
sns.pairplot(tst_df[['CALCT', 'CALCS', 'SPECTRUM_CENTER','XeMultVoltag','PHOTON-ENERGY-PER-PULSE','PEPP_predict']])
cutfilter = (y_predict >= 220) & (y_predict <= 330)
tst_df = test[cutfilter]
tst_df['PEPP_predict'] = y_predict[cutfilter]
modelPerfPerFile(tst_df)
# ## Trying to reproduce best hyperscan run
# Based on offline batch run from the Hyperparameter scan chapter.
# Scan is still running. Best result as of Mon Jan 20 10:24h
......@@ -605,7 +712,8 @@ print_model_err(model_scanbest, x_test, y_test)
# %%time
#plotModelPerf2dens(model_0, 'Model 0', x_test[0:1000,:], y_test[0:1000,:], test[['PHOTON-ENERGY-PER-PULSE']].columns, transformer_y, '.')
plotModelPerf2dens(model_scanbest, 'Model from best scan', x_test, y_test, test[var_dep].columns, transformer_y, '.')
plotModelPerf2dens(model_scanbest, 'Model from best scan', x_test, y_test,
test[var_dep].columns, transformer_y, '.')
# The outliers show some systematic patterns. Actually they seem to be primarily confined to a mostly discrete number of predicted energies in the surrogate model. **Could there be some potential systematic errors in the measurements?**
......@@ -614,7 +722,7 @@ plotModelPerf2(model_scanbest, 'Model from best scan', x_test, y_test, test[var_
# %%time
#plotModelPerf2dens(model_0, 'Model 0', x_test[0:1000,:], y_test[0:1000,:], test[['PHOTON-ENERGY-PER-PULSE']].columns, transformer_y, '.')
plotModelPerf2(model_scanbest, 'Model from best scan', x_validate, y_validate, test[var_dep].columns, transformer_y, '.')
plotModelPerf2(model_scanbest, 'Model from best scan', x_train, y_train, test[var_dep].columns, transformer_y, '.')
# ## Try out DNN with dropout
# Since the model0 without any regularization shows just very minor overfitting within the tests, and also the hyperparameter scan points as of now to low gn being best, dropout probably makes no sense at all. Still, in order to get a bit more acquainted with the features, I ran a few tests.
......@@ -670,6 +778,17 @@ hist_0_Dout_3 = model_Dout_3.fit(x=x_train, y=y_train,
batch_size=250, shuffle='true',epochs=500, #2000,
verbose='false', callbacks=[plot_losses,es])
# Model with higher number of neurons and dropout
model_dropout_m4 = keras.models.load_model('model-dropout_m4.h5')
model_dropout_m4.summary()
model_dropout_m4_hist = pd.read_csv("./model-dropout_m4-hist.csv")
model_dropout_m4_hist.plot(y=['loss', 'val_loss'])
print_model_err(model_dropout_m4, x_test, y_test)
# # Hyperparameter scans
# ## Test: Make a hyperparameter scan A
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment