From 33aabf45fa9b6adaba69867ba6687af5a8098577 Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Sat, 8 Feb 2025 19:21:17 +0100
Subject: [PATCH 1/9] Combine dictionaries of ICAD_HONO.yaml and ICAD_NO2.yaml
 into ICAD.yaml

---
 instruments/dictionaries/{ICAD_HONO.yaml => ICAD.yaml} | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 rename instruments/dictionaries/{ICAD_HONO.yaml => ICAD.yaml} (100%)

diff --git a/instruments/dictionaries/ICAD_HONO.yaml b/instruments/dictionaries/ICAD.yaml
similarity index 100%
rename from instruments/dictionaries/ICAD_HONO.yaml
rename to instruments/dictionaries/ICAD.yaml
-- 
GitLab


From 131704dcf2d6d816e3067a94f2aa91d830ab931c Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Sat, 8 Feb 2025 19:22:27 +0100
Subject: [PATCH 2/9] Add dict terms from ICAD_NO2.yaml

---
 instruments/dictionaries/ICAD.yaml | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/instruments/dictionaries/ICAD.yaml b/instruments/dictionaries/ICAD.yaml
index 868dce7..d768ce6 100644
--- a/instruments/dictionaries/ICAD.yaml
+++ b/instruments/dictionaries/ICAD.yaml
@@ -119,3 +119,20 @@ table_header:
     description: Sample source
     units: unspecified
     rename_as: sample_source
+    
+  # NO2 additional vocabulary terms
+
+  CHOCHO (ppb):
+    description: CHOCHO concentration
+    units: ppb
+    rename_as: chocho_concentration_ppb
+  CHOCHO Uncertainty (ppb):
+    description: Uncertainty in CHOCHO concentration
+    units: ppb
+    rename_as: chocho_uncertainty_ppb
+
+  10_#ICEDOAS iter.:
+    description: Number of ICEDOAS iterations
+    units: unspecified
+    rename_as: icedoas_iterations
+
-- 
GitLab


From cbf468f5ac76060f34216feedd4d21aab73f4b70 Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Sat, 8 Feb 2025 19:23:37 +0100
Subject: [PATCH 3/9] remove instruments/dictionaries/ICAD_NO2.yaml. Its dict
 terms are now in ICAD.yaml.

---
 instruments/dictionaries/ICAD_NO2.yaml | 113 -------------------------
 1 file changed, 113 deletions(-)
 delete mode 100644 instruments/dictionaries/ICAD_NO2.yaml

diff --git a/instruments/dictionaries/ICAD_NO2.yaml b/instruments/dictionaries/ICAD_NO2.yaml
deleted file mode 100644
index 5e6e0db..0000000
--- a/instruments/dictionaries/ICAD_NO2.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-table_header:
-  Start Date/Time (UTC):
-    description: Start date and time of the measurement in UTC
-    units: YYYY-MM-DD HH:MM:SS
-    rename_as: start_datetime_utc
-  Duration (s):
-    description: Duration of the measurement in seconds
-    units: seconds
-    rename_as: duration_seconds
-  NO2 (ppb):
-    description: NO2 concentration
-    units: ppb
-    rename_as: no2_concentration_ppb
-  NO2 Uncertainty (ppb):
-    description: Uncertainty in NO2 concentration
-    units: ppb
-    rename_as: no2_uncertainty_ppb
-  H2O (ppb):
-    description: H2O concentration
-    units: ppb
-    rename_as: h2o_concentration_ppb
-  H2O Uncertainty (ppb):
-    description: Uncertainty in H2O concentration
-    units: ppb
-    rename_as: h2o_uncertainty_ppb
-  CHOCHO (ppb):
-    description: CHOCHO concentration
-    units: ppb
-    rename_as: chocho_concentration_ppb
-  CHOCHO Uncertainty (ppb):
-    description: Uncertainty in CHOCHO concentration
-    units: ppb
-    rename_as: chocho_uncertainty_ppb
-  File Number:
-    description: File number
-    units: unspecified
-    rename_as: file_number
-  Light Intensity:
-    description: Light intensity
-    units: unspecified
-    rename_as: light_intensity
-  10_#ICEDOAS iter.:
-    description: Number of ICEDOAS iterations
-    units: unspecified
-    rename_as: icedoas_iterations
-  Cell Pressure:
-    description: Cell pressure
-    units: unspecified
-    rename_as: cell_pressure
-  Ambient Pressure:
-    description: Ambient pressure
-    units: unspecified
-    rename_as: ambient_pressure
-  Cell Temp:
-    description: Cell temperature
-    units: unspecified
-    rename_as: cell_temperature
-  Spec Temp:
-    description: Spectrometer temperature
-    units: unspecified
-    rename_as: spec_temperature
-  Lat:
-    description: Latitude
-    units: unspecified
-    rename_as: latitude
-  Lon:
-    description: Longitude
-    units: unspecified
-    rename_as: longitude
-  Height:
-    description: Height
-    units: unspecified
-    rename_as: height
-  Speed:
-    description: Speed
-    units: unspecified
-    rename_as: speed
-  GPSQuality:
-    description: GPS quality
-    units: unspecified
-    rename_as: gps_quality
-  0-Air Ref. Time:
-    description: 0-air reference time
-    units: unspecified
-    rename_as: zero_air_ref_time
-  0-Air Ref. Duration:
-    description: 0-air reference duration
-    units: unspecified
-    rename_as: zero_air_ref_duration
-  0-Air Ref. File Number:
-    description: 0-air reference file number
-    units: unspecified
-    rename_as: zero_air_ref_file_number
-  0-Air Ref. Intensity:
-    description: 0-air reference intensity
-    units: unspecified
-    rename_as: zero_air_ref_intensity
-  0-Air Ref. Rel Intensity:
-    description: 0-air reference relative intensity
-    units: unspecified
-    rename_as: zero_air_ref_relative_intensity
-  0-Air Ref. Intensity valid:
-    description: 0-air reference intensity validity
-    units: unspecified
-    rename_as: zero_air_ref_intensity_valid
-  MeasMode:
-    description: Measurement mode
-    units: unspecified
-    rename_as: measurement_mode
-  SampleSource:
-    description: Sample source
-    units: unspecified
-    rename_as: sample_source
-- 
GitLab


From 790638727155b3513dd5defc22bd8bc7702c42d8 Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Sat, 8 Feb 2025 19:45:16 +0100
Subject: [PATCH 4/9] Make file reader selection case insensitive by using
 ext.lower() and update config_text_reader.py to point to renamed dictionary.

---
 instruments/readers/config_text_reader.yaml | 4 ++--
 instruments/readers/filereader_registry.py  | 3 ++-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/instruments/readers/config_text_reader.yaml b/instruments/readers/config_text_reader.yaml
index 555c429..a37bd72 100644
--- a/instruments/readers/config_text_reader.yaml
+++ b/instruments/readers/config_text_reader.yaml
@@ -30,7 +30,7 @@ HONO: #ICAD/HONO:
   file_encoding : 'latin-1'
   timestamp: ['Start Date/Time (UTC)'] 
   datetime_format: '%Y-%m-%d %H:%M:%S.%f'
-  link_to_description: 'dictionaries/ICAD_HONO.yaml'
+  link_to_description: 'dictionaries/ICAD.yaml'
 
 NO2: #ICAD/NO2:
   table_header : 'Start Date/Time (UTC)	Duration (s)	NO2 (ppb)	NO2 Uncertainty (ppb)	H2O (ppb)	H2O Uncertainty (ppb)	CHOCHO (ppb)	CHOCHO Uncertainty (ppb)	File Number	Light Intensity	#ICEDOAS iter.	Cell Pressure	Ambient Pressure	Cell Temp	Spec Temp	Lat	Lon	Height	Speed	GPSQuality	0-Air Ref. Time	0-Air Ref. Duration	0-Air Ref. File Number	0-Air Ref. Intensity	0-Air Ref. Rel Intensity	0-Air Ref. Intensity valid	MeasMode	SampleSource'
@@ -38,7 +38,7 @@ NO2: #ICAD/NO2:
   file_encoding : 'latin-1'
   timestamp: ['Start Date/Time (UTC)'] 
   datetime_format: '%Y-%m-%d %H:%M:%S.%f'
-  link_to_description: 'dictionaries/ICAD_NO2.yaml'
+  link_to_description: 'dictionaries/ICAD.yaml'
 
 Lopap:
   #table_header : 'Date;Time;Ch1;490.1;500.2;510.0;520.0;530.1;540.0;550.7;603.2;700.3;800.0;Ch2;500.5;510.3;520.5;530.7;540.8;550.5;550.8;560.9;570.9;581.2;586.2;591.2;596.1;601.1;606.4;611.3;'
diff --git a/instruments/readers/filereader_registry.py b/instruments/readers/filereader_registry.py
index 64b4fb5..fc4010d 100644
--- a/instruments/readers/filereader_registry.py
+++ b/instruments/readers/filereader_registry.py
@@ -15,7 +15,7 @@ default_instruments_dir = None  # or provide an absolute path
 file_readers = {
     'ibw': lambda a1: read_xps_ibw_file_as_dict(a1),
     'txt': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
-    'TXT': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
+#    'TXT': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
     'dat': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
     'csv': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False)
 }
@@ -52,6 +52,7 @@ def compute_filereader_key_from_path(hdf5_file_path):
 
     # Extract the filename and its extension
     filename, file_extension = os.path.splitext(parts[-1])
+    file_extension = file_extension.lower()
     
     # Extract the first directory directly under the root directory '/' in the hdf5 file
     subfolder_name = parts[0] if len(parts) > 1 else ""
-- 
GitLab


From 81be6b54c81082c0dba69983dfee0cf6d869518a Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Sat, 22 Feb 2025 17:10:53 +0100
Subject: [PATCH 5/9] Change import statements with try except to enable
 explicit import of submodules from import to avoid conflicts with parent
 project.

---
 instruments/instrument_registry.xlsx       | Bin 0 -> 12089 bytes
 instruments/readers/acsm_tofware_reader.py |   5 ++++-
 instruments/readers/filereader_registry.py |  10 +++++++---
 instruments/readers/flag_reader.py         |   5 ++++-
 pipelines/data_integration.py              |  12 +++++++++---
 pipelines/metadata_revision.py             |   7 ++++++-
 src/git_ops.py                             |   9 +++++++--
 src/hdf5_ops.py                            |  10 +++++++---
 src/hdf5_writer.py                         |  12 ++++++++----
 visualization/hdf5_vis.py                  |   5 ++++-
 10 files changed, 56 insertions(+), 19 deletions(-)
 create mode 100644 instruments/instrument_registry.xlsx

diff --git a/instruments/instrument_registry.xlsx b/instruments/instrument_registry.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..283283bbe21aaa4cb7a490118e9275dc3db07ffc
GIT binary patch
literal 12089
zcmeIYg;yO});@f3CpZLm3GPmUySux)y9bBh?g<uLFAl-oA-KD{+mCejd_9xt?=N_#
zYOShO_tf62?m5r4=WIDiFmN;gBmf!!01yHA*#pLGKmY)62mk;T01c`sXlvtSY~!S>
z<Zfr|s7>c)ZAJJM9F#H#0Q&a+|BnB|Gf=8DVAIKf)T(;$QK(HNEH|XM7#g<;SCe!P
zD#a~MjTE^3p7P0?!bD!V3c7<#-fB#f{E`Q&Y=K-|;M1lR-GjO;sUBQmRGGs$@3UC8
za6n$EN>oiu1Zxf!!s#jGR2`I5!-4^w1@S8}83t=DEc0&1h0MM{N$5vGvepkf#h~|d
zhr9!w>Y$QT&R7N<Zh;E4*b*zMEiLj)%qpZ6>5h2HbP2anRA^OljkOO1;s=A1XgS(d
zX79NK2|k&Zf)rUZ5x_GC!yr{LqFoT0`a#l@EH@1{Fu`^$xOwRviefcoZk7X`b)UEc
zCSoF$b7W_#T^lX7qTw!69WReq`t!@eXa_Znk7RCPUfi&Zy{>I(@-8v`$My)b7-P=e
zTR%fP1|yj-16G=jk*{za%Ru*F&aYJ1G0!j`6A-^lb8tjwEC7;S^)X}1xvt^C{o#)U
z9n9u#B606a77>>-zw#5TC7mPUyu8`j>nj*Q?mx`5PMLw^>J58RZ=-N;W~%F8Y~@H#
z_v`vU^ZY+-lmGVDOXH+vyBOeuPsN@>`fq2JV~_+SUHQeDiIjZ&#FyUHMSUa1S#GDm
zMN+~J1QYdb^?4dxT;_>7=qI|_U@472LE|O?x|RheKiE4%eIR#860t8`??!f+xt_U6
z6_s$ObZL$LP|{G8D><-AA~JO@RE0D~r-}`OnvWZd!JF!@(JQUKVsKXuG9#dPTozne
z&z^G-KbG!0n^drm@G+EA>S!_*z2DKmbiUlH--_t^9$Q(_jMKc*Aj^S^*j?Ai>c^!}
zdOPZ~7p+W2zXCA}${o{~XdhY5wXa4k%lS~IdpA2wS4scbKtKdR(aKxI`fno%(`RbT
z0ec&H3vKV-X2#8m-qqH@(!kc%@>d)yRkpVM#)0(WGx-eF?Fvi&3Bam-?x9Mht!xfl
zjwe<|Oxd8;8%-f^wRnAu5*J$>IY>CvNtmlLb$c2fX5M+40+uIGbaD@-rG}5fD-%ig
zIxO%EPty-6IV$I)HVoMJU^6LZix>Jd-CZYNB5X!m_Tr-h%$ia)U?(1$R*cEM+q?E^
zO#0>Pwe*4}J@KDv$k?qUehzJ#l<u;-!k!XYCc1k<gI7UgAz>VPNMkX8>J$%x7(bO*
za3Rsi%mh_6e3`4)=smz7{8{^T!yTZU$aP6ifZcoE6i`{;uL8tvLwt$>3FOQIFZlM&
zBF+A1l|TkaZ5xR<+D9g*&nx+Qo*98tF*KR+6dx?z^m;PiRXZT6jZ$MV`CudX*Fu)v
z#k9QpIuB)2;9;*b9=>#vSF#3tj<i(&E}{HkDU7r*V2jxqRuV{b%I3Bj%v<)uf@=y!
z;W{EPqoOe`9`rU*Eg(kJZ4bWhTuRd;fFVEvX+@8p_$T!EI$wGA%#t5oB0O3-O}{Sh
zHaIri6+~5MC^JDtvn7p0{_c!a)YD8dVy7-T`sUW@<409PU8j=OqLj_+4~48c69uSV
zd&xG}BDHr7f-iwq_T-Maf=n)-U0>T};m>wnC`T+i(C>WTkLB{P-@$}K#P#2{dcn+m
zihm^E<ZJQ`8zvP8W1iTUW@H(v2BnQ85tZyTM+=TO+W?QZMS^#Iid@%8@Wd0_OSRyi
zh?Y`y<JO2-zh&{wKQ}n~n3mCwBeKZ;zOv=qhHBoaB0N^o*i$q;!`h@cSPJ^!{@H;U
z>(q=+A|}IBZLNCoF>^cw1&d@cF@5}f+`N?cIXpYGypcuLp+*ego%zW$Q|)x50d*P|
zh65o;HMlJ$f-AKgj!FLehjo#adT2U(&UfM}=qE$WcgB2Tx&#D6c8UHk+bkR&{S3VB
z!B({p16VB-SsniI7>azRYDrQH11iJZjKhS|uvf9h;EG)}EF`IOY1<wJI~u5p5v6ld
zN9HILxwMBAt9UQ3_JN3a8`1P+?}ft})f_T-J4>wi!;xGG8?=A?xTT#RcM&uMMJq$g
zPYdD^gcIzWJmSclQ$!OD%WR8F+^6Dm^KE=MW9j%BvXW%YTk6eO@VMM%QnffYd#-K!
z5TUl?{d(2Tx$@}>opXY-m3M;i!K<!?Yr-{)=Ja$@F*C4)0a51m9Y(}jwDE;AkHD+G
zz;*7L1FlhVG*jap)IS-sKVL%f<(m;Vy+u5105r&(G5;C#{@tYi6#zlr^3yk$@PGR#
zRgjkKe#_6`UP2gL(_PS!=bh+@kCYGJK@ZeYEs_&4`&_N!d}z{E`zc8eW*h8&I@0HI
z%Zj)GhI-vWT@;Q2?tyA`$_-&VaybG9ZE#$MA0P^WigI*tSaJ-9nC6Ju$QRd7lZD2y
zb4N+`KIB^gvCviHWH5`xD1$STh2uEh;9CF~s@?K?_^fzT7EoJXPnbq!vi;g9xOfAZ
z9pFwdarquboCm=jFtImHM19vGd;@*a@?;|IK5E(V?E!upw?q=Zn3dNn*oiQtUzEU?
zD$a5HYihId>AI7^;~-W0;swpn)v3wWK;O*j+e-hp|KH+VqRs^e0J@L>0Iat+{__8h
zX2!-&j`V-r7=HzVjQGVb=?qB0r_|SYG^>sbdT}{1GX+-DttB2UE|D-Q3<W?(HD&Kx
zJi-u(Sm1<&U)1sIqiKr%i=7g}0-RyKjib~P63K*v_Ml7rCc&!e3L43wnr5{~J#im>
z=SE0R-@D5|Hcd^qbtEK>^v#7vIylfF%iNS}X9|tE@3r#79VPSNK<0qNKrvQ+v|#g`
z6svQw{$XBtZRNNKfy+#kG8`fimSO%UU{WUU6k0n}VaogoSG45}hyg9mz78a%NVw@(
zx(W`^hVYa`97|LbOcmVK3;6!0ZAQYDh^96tL4OC<rS(IN9tMLC(ejQjW-zHgvgbVJ
zS$OygCQ2@*ic?U}>F0b*wbw=|V7&2{S0PmFIJ5h`*e$A?IAgIEV}97^UUB{ib(Em<
zEqa|{Nf`8#8UYa{P9M^FRB`*tly?YMis6CJreAP1eNm&<Wc*+{icZJselEVOG1CP3
z)pq&g4>ZJko{O_cfAOTLZih;a7q&ZB8?LylS+&^Pr-w}ZYS7&4Jrgn}U}FK*i^g=U
zNk**;tIv5wsrHC`47S?@0?rc~_dEv4+xx|^vf9b{i(<A+XUY%rZr7jY*Opoo6xT(8
zgYaBf?>2o{&c#s*UMKjGA@L$jwVjgn9Z)*mP{^wYq^WWPw>8x2<Ge3ygAv3LZWywt
zrn42&@}^))Tcu%wZEs<Jq391NB_wIf95NsU)hif-)Z;a`Qwnv8V=5#{@N}Smhds&)
zi#X!!gYC4nZZo<XnL8KfZquThU3cPjK7L&6+jy>uHDP@D*!tz+P=AVs&)A#mBabga
z^0}9LD<4~lug#Rxv3t|4xb08LZ!hi6B`)rV3)}Z05JTYVwyA<_YCExbw3-w^rR%Lz
zizjUsisl?L1yvlTdMRL@P43Xi+OFl3*VQ)|2-?`Etxo|MO)cbWmF4zoqEH%PIb_vc
zUvL2@=3*F4slwAyx3|(9)_t1E)IDYIYlTqqoDXPGHP$->YC@|Hko8XB3!2MfS4G?%
zpf!jGF70R?Yn`F|o^AlckC0ogtQXCcvQr3eBU2l<N$3{<^%a4SH5Iq47mt7a?#TzF
zp*aGn=1mwmWsZE2GxQC;VOhx`i{mAL|CfMc=|J%Zb`|+-v@Rvm><?&^yyIWN@`UZ?
z27Pv>W1EiRJ>Ae6p*mDa34{pR+v54LKiS^_uJ4Eav`Ljgid<zw3hlsUl&hg3<f5H+
zj1!9UrkcM&n=UJn*@g*#Q)X|SS#e4~mX3vB$<%!Ku$`6IssDXe8db<MPZ%{Oxu*@1
zYgR}(V~|%i2u!&mojmkLEP|YnJq&+cJz3UBE38;8(X-s40jl*y6ZO;mA!3&En^^f$
zH<PcK@2|x1PcX1V@xyk0f)}n}+5)H`Bj-=GMwEL5^ye}2XMScOcJt8GcX{=8d%X=$
z6{#I|hLKI<?euXC*2|C_qI<BFMLOA_s}Kodh_!b5%n;!gXtP(p9QIVkFF!#3qVO3G
zoU!0wkA^iMypibf!F3z0!)6~)vgF;3FG+N<{*RHc0~4uD=53^x@E1k%hg)+pGqyIS
z|Ks|HM>|p<iNIk;YsY-}2=D0n#JU+xwz4*66~9bsl$n5A2RxEjVP=YJ!9fKj<Gfa&
z%*#&}u;opd`~-&3dKC>rTDwm$K`}B%QnpQ+ZY81Nf)FA4(pp$}o8{wi>2yBXn&Kgv
z0@oRrq}wKU!J82|mqI#~<ZH=6$>Jp|mJo?-3}bT-NAo1PnC;_$Y&`(oHwShvWR;sl
zAkMk?0iy>xIJV)335WkGd1RkhCrLI&A1+?tN(u&bzCsxmN!*=IfPtK+Hxm)b5kjJb
zujls`*>%QP0fb1(Rn_7uvfQf8u4;kMpgn~XJfejQ{VYaq{0+~^TS^oZAG3S1=UvQt
zdW;Dba-nTVbwQh)SEfcOJukHh$BX?Ai%CN0Kn>4)U9cjpzU#whjdtK<A&41EKw<%K
zj_WYdCPp7Y?qMdkS@l$T2xDbjSG%SWuQ)nrHb=>~GME6qX-ycef}e^{t1Beaq|o$m
zHU~{tvujrJSVyk@%2G~9+@T?uE~w7JwQ&L6sB`ZN>I!@ye`_4on0yRE$&rGhlo~Pn
zwnpR$6Cc12RDo1{W1t6Kb{x98A%c=~t$UGd3L3>W7=>J~L5T`MLiw(O&;O->Bpk@V
zl$WTj7p8`zaN}msJs<^qqRyFNC#28f^?W(ozYJ;T^}X7=@6xSqtotBo14X8&crp{M
z`}+KhxuV<ta&~aezCTTI-r3#$bU&2b{(8=JUAeJ@PT%Hzaj_qdx$1qrD;tfsu!xJ7
zaz+$ZbymJ^cuF3j?~nL+4)QUxn?ayE?i2E!OVP<tKd6o-;X$W!<#FdCu39fd3s1Y{
zd=tTWBk7ma3Xv*^CBsM;ELX0z;<TZ&GrKIQ<@n3Su;#GwSj!VSiBiDrE_06RhQfSQ
zrST3zI7(OnN5`aMdzD}$UP-fH==Llx(}q-K*wqPAMPTR@nV^FnnF3v{ph4(Rr!nE@
zg^Q6D>x!--%NbrTBNF|$?_lr={l48hWJV`{q`)b}$con%VYJ2M;M*ZeUQSOc<PlDF
z`Cv%%S#pOf<Qkj59Tt8Q@eyCUn=MY#VQ;cA0*g78*JUnBj>mC9Dz3lVha8beIMy{#
z9Mda$o?D1l5Ld36aj6~#*!gTi+yJp<k(NLa`d~bYSJe`k+Dd@w6i^vqyubsTs~ZVp
z=H|Nmp19DArRWsdi{)9*=cqQW&pfo-IH5|cmxy;HQA*uEs(hWH2VxvoZ26PQAsL$5
z`1U-(y3*9vKEMrwG7Nb&?;cxdSwfp*J44AM^9!Q%8Y}2F!B)f+)hxswLtJya_+b>Z
zx@ocp#)+4<9kq{bO`o3b3h!|n_<Jvl_k=g~HzP>$mH|Ba!O*bqn9Bj7=r3AAnI#%l
zCkg3u>1^-7&_q;PQRBCwdd9-4DV|&h@m)b2At9nQ&mR-G(kYF1h5Tz^ZV6~CJ<PI}
ze%Qs8I{tK5cGQ`<dwnc6nbP5_Fdnr`z0~NvzPaYNNNw@*A0$7$*vpb_soG{HN;nyU
z=xG*k^#*U5h=gJ&T9j|1XtV$sCB92zO`%yzAfer~C;v#nv=BWGuOVo>{39b=>RQQ-
znLNiZFT|M|J$t<DQRdPuI%-PBw6U})?K>Pq+`Su>R+M0}M8Td0@99d}^r6q5lB;?$
zB<1`RmcEn;j=VFLye^^unvUsbo%vL?2`9A)xr5|f4H5foSO#l!bhquBHR0Cn5dH=J
zU=!SgT|@N3+SHcFa@f&S5s!H0@2Mbi2K3B?(N2o<P4?~<&gR9f!g;hLkxn&H43i82
znCmp$BT0|yy-leh(@e6B28}ol%m-2LkS&FYDH{CA$rZ=Dxoz(2ETHQ@kg!<ZqwZmK
z&ALH0l1yu2epPI6c`u%2bLXm`^d7T1L@0qvD_&+0k})>LGohPSm)18Cfn@^st2MQ%
z9}=I&X4)ref^>T%C+>bXrVbMG;AJfNEExj-b;WYKm*SpPyi(G6+|E{lsOVNOu3dKI
zJT*0XYN#(^ei4BV2P2IB>6PfQ+xM;;dV^-OE#Z@#313LKBII1YrWj>)vWArpPsmae
zV9Cy>b>tq>@uU=k-L%RsvlGk=IzpQ7m=^SWIGA>d-w)y5LMXWqoUZ<mZ}Tie>rie<
z*NBKR<1W#%KJ6U=*1<rVJ<KGn-XhZ#UQ;8#Sj$WU4KF**c4zG@!oho9PVuonb6CUq
z6V0JS=H0{KQp=19l&e3Fhn+7+p~9Y=fNe4f<-;USe*&9c#8SL^6!dBfCk5@R50w?v
zvcw#10=9lTl2V{TE;6tr$OTJf=($qs!1=?wHN9AQEyStA2M{g&abXwpioRL8lg(|~
zks#!V2r1oR%)0f5jW(@$FZ)wu!pz6I!5npK7mFa1I$lcfR+(?(M?blObjmbO5j>VF
zCvbVrT1gNP7iMha4%uPr9O^ZpEy#?0zNr^MB}#A?)#E(pJQGqI>TOeb8?^y$Et-L^
z|Hvx2=lWUYy%nJfkN^O}pW)Zh$=%A>@mDF4r)+Jx$bj_1H|LA=a5Ct!Ndp5Mnw*x~
z>&?MCDlr%eE7FN1H687Jd6aR{!Q}!@_=L&qPC1S(J1R`3%fQwe_Bxa?;c5qAF)AN~
zmq-V<^PzXZlN=Tm7mbG-yN%Jb4Xn0HmMy~FG#C#P`N^?$Y3`;$5CNB%#E%ngQUd7f
z*y6b6(h#bm^lPhMV$ettyQpFlsCfu!jP5@Y#-;57VI*jA4(`(6PHuQHM5?uxY!ld=
zX#FfOrOm>c%q$G1Qw`%*9*Lv3@+VYgPhjoB$}M%Olbv>rWX#+}K(}UzgkfMdBNn(X
zCGubxt@>KMIAkr$GKBMO62Tb#i)=eK)b~$|&$3$_LkATf!H$anvY*AXk~GW$amAoA
z4|&5S*o2l8I%b0(E7K(CZUVV)zUUkaW^9dno|0zu=~vl>!r2?3dxEa@E5?j5euJIM
z34E@#C2S24$I*Le@D;L!Ulz4z;`;RP*}ds!LBIgt?dIN~VuuBk5W=d}^MMmsyX7%%
zQFbAEAx>G*Sw1`=1=t`}DpIu1L;DQooOYBFHQfG5349mtg)~Q+Tz?1dS#VSz!8e3C
z+hAdB@342FBny~%viq^K++Eqv&gcxBP|&qDjdohk{tS95%Z(7Zsz2^aX4!WWAh_hK
z(tw@3nS3U9R=x)4w#HuJ51p<H*GTXA7T@W7H#=~w9OYHRqF0l*bPM)fq5U|l>MD#o
z+__DYqE$nhLNTNqBW06ZqRjYi^Xz7J55%qO`gFB~Cn%z>5)>cCk@7}vV{<-veZnMX
zb&PTx-!Z#y>dX)&h~FMhq{B$Uw8P3H6I>Xjr)1yAJsE8v)bnD{l$cbWg@+@{tJ+SB
zsbfaI<ePvYeJJ>1I~xBNiI~f5{ui8oDw2a?L-)$xfFAL-;fMAYoXzwdjExkX9L#M@
z|3Gy@g05{k16t22#Wgo!JG)8Q-V1b*T$)uPFi%3k*|xujy#oc9T4BDnI9n<?lUfQJ
z&3F4UT;)x7?o3^>1_KXUhH_z#5(iMqZIo;Nykxtxn=S5_L^b-MjF1SAuu*1=uRD~@
zuC()W!q1<&R6Sq82kgFD%9U5oh%g-{nTS@g{oqIfsHj{K!&ej;3B=}%M^(dwwZ-Jf
zZ@7X@VXj}GiW{PtTPb!JT1=(fl(dHnSYpErjym^r(4>I3n^J#Bsl#~7;0${bnZb9y
z%uiOVwAuKQz#iI}G#Ps10!zP_Mpv{Mw>g;tsu;oIS8=EsDz)2O`0DtxA9F2L9yfZm
zb=%Z{V9ysHTE+^8;M)DF@sC;z!Kl*9eRtBgpNKR^o@lMOIZXUzeP%s?_&hV4E6Er&
zy7OM+Y9c-arh&FeDz>i#ka^zyS_%7f;hmJe>e2@%XnAFfn9+~+NYrvWqf=S$Iij{g
zgkbF0mMfj4R8H?7q1+yYHpMUp?SBweZ&H377u{bpcX7OkecZ9fe}Nz2*kVv1`QG&-
zs_Vi23i`mOV4wMI_5Pa#;kIUy?06dycw1wVKiBscWA+yb@+XD%Hx=>+f%ZANBfOIV
zP52h<MbOPN{$~)ppdveQgYqtz!NUUNVsgwa+Oua3uJMoUZM(5;w)97y?>Huiu{6_7
zpg~)Pm}Zq{o(r_?{8bzWOK32(JTdxHbZu#1GBOfjeKBh2a8*I_q|aDgKHc+GUtqq+
z@a@cthq*Hh+(+Pymlj)CU+6;B(#zDH(rkIF26FK#sJ|S2@h0H0`PM!vb}{x@oHIs`
z@2ePRzwq&ep8@}&>rIxWHzIg}_3+T;4a`5yK%q~Czu#NEw1)x!Q2aRqc5l#jGImfj
zc5?cKar)n$8R-nnZH|&WS*GuC`r0Ai$MQoPYdMJb==B+k4OsVDTaFwhyzjN9Otv3l
zoxnzB4V|DIK4K6lVf|bw_}Q8<*T`a!tTrWFIih3dLDBc+<tekq_i6N;XO}1Y_MRvE
z@@3O`^pZ20eGP#WK=cV538LGJ+tAe}qn(O)4Fntnpa-c2a^pv)*Ay@E!xaE(2P$()
zc_$+)1v-umW`s_(_%SR3Wb#uLJgyXYH-Ku5M+_cttTaAa;!Dx(V%6Swno#l3-ZkAl
zp?Je1FEd%Qu`#Oy2M$1ybR0d0Eub-CkC4HB;;4|^zD~DHvkO8uLi>;-Bsy|8CDF5C
z$~X-@y2puA@y<FfuDvcX4#9h)hq^eZL#B>#s<IS{14{G|t8<wq_{4mkL}|C#y(ja*
z6hqfB6ziFgKm$0J8_h-_wQk^_B7xP#H7RHwgu8-QkvmZ|-7Hor&Px`tnP%oZ)?7(x
zb8kWmkzma==)6c`aUuK@7n-3vfAA~3XU38y^D!Bs>kT@5LREZp)<yH4TE6fZ^2XGN
zldz~ozo=Kat>hf#R!HiGn+vCW=A09_6vg`|P(wl#3e|T}VjmWA!ta%I`g`dGIqb<O
zhse%Mno*KnRA3qtzCiKoV-6i=j!|?wBSC^=v4CCE0drHYu^~PlE2Vxuhv%J0q<$U7
z#pTF(t^`3II{@}q8oqW}ck>I9Chb3#7c2x!@}<=%SmVVTboka~lj0-mI%ez|Ds8fE
zczL+LQie_|V<5tHPPFo;@G>9R2N9I7U(w`4OyWb3_Hd4K{<MHI?F~NOw2R$C&t*6y
z@UGTM5Z;$>2iCD1cq-S_B$PeGc7l%}co=rEyq;hUG)D=)B7D3uqH>(>z>@WP=%OT<
zDyADh6}Lk+{YH<OMXqF=cc^2YvKkrf^B$aXf4}U6+2%yLBlinJZt%7_W~CSj=4-v@
z5lrFQl8{KhZMk}2Re8v@m9EJig`(MF@8y;xhxtbh?v-FY)0jv3T9ZsaO+_gLtMlRT
z=uBL#@B#(U&H_0T(+gu<^~{o)K;`1RMB;B}i#t6O+xYki4OvbJXKAT96vu?KD{aU%
z<Z0iV;U21y!4KaRRqCWKwDZx_4yZLhug8G8-$NpXj@)x9%Wqm8T2?UI&`ro#TRG{2
zmRrb^5-bgttw=JUhqmN9cscbzOq7nQJ}gHHZKu5CY4DZ#<UjD<bbkX6_D-2_Xb)XN
zUXZHW(Y9MuljEVISqFBbCr!ongJFz$Wuj(h;O+DPa@(2d*(q7QZIWEzALATUh#Wj3
zEc%j>UN>5vWRT=IL+T^+9(tBBtr<Im<GxLz4>46Fr9SY(H_->4?BdWFKL6lfh<QxQ
z)i_aR$7R8<C{NRdv==*^I|tw?s?zZ{yu7)gns^hXoBl;CuDeS26WfBq;~b2O@_Z&u
zQM+Sn_YD%2*xQ<3&akbVSZv7li=5eHRMg@HoA*cKgE4%q+)r#U$Rxz0R8Q-NbPI((
z_c4X3QDD%=p-}XdlV+~IbG7dH0+%TsU5a=;*(A>k?(6CI@`WB=4aQ94fJK-?)G222
z9$cbL)j(Oy4Uun{_$wb9GTPX9Uq;(kG*RW}Nnm0N`9}9Qc`-E-;gi!k2>VKW*B2tS
zFEeOT2Tw{Y5RczCC`#~{@Hbx!zgsyM57yE&=DMT~V`pk+v^rC;H%yD}hJfUPt0SmD
zriv#<F0>4z-yWg*F&?V>;`g1sCB2u$+0tTS)<!yZrHF7xS_==V?>dKYR!NM4bflqY
zQiZuG&jq+NbT*Q>7Rh482dl`Q`SoWCtyE@#RfVO^wd3$>9f8BFTk;P&e{Ilaa2J@T
zSCqS^MfT%+V6!2YV4u_$yRGb3W2-U&x^poiGrL^*b+zD6yz}DhyK|qsYZqNK?MF$l
z#gE6ni<Y1##g1y;ZNnXMC0nxke5PcNO)4?D+&LV|mh|yuPHrL7Ne*pS)gojx;>fFZ
zrN3X2C-6?E3^e+IIqiN{G1v8YSIpSrRSYXU>ir1f!G?9gC(DNY9a#P3W0|?SRMXh~
z`WY>2yz0Ag36*LY?pMjo40QY-WcIgB2t2B<f%^ehL6nc8O!GK_F8eR2;}{k`+{_mc
zS+ij3W<Ri#mQmn6L;B67Oy42Ui9676DImH{8t2ykAU$9DvYU8~juuCvexL(2++qDu
z?fbf+qFsU_@<IhQn?QQdIO5uuul^c4)SQvtN~n3qTsb&W)Y&PPC`!-Mz*qr%6RoBR
z6=XAQp#WXn7NZR-5V$9zW0RY<ChE2}-VC%dQ*5U?iQ0vU^AMigwtVmGQ%wR75|9t)
zZbym3+bU-x{!8g^wDBi8{xwU^bUwAf94e9~CR?=r8=tlsJ9<bTXC`Iribmb_F4l%h
zovn6O`&u$f<hgGzyaqoTUVb)cImk;Np6`!*rD&|={MbxQT|Sd_Vk@Rma3~hd|4_AZ
zl8{k(ZezqaW?ilvB!<P_KWI)*#UDAOsDYydn}6EII+di~L_v3NQ!aQCl~FR6x%w9p
zD!^a<AYOlOc)`gWHdcB|VFKSOFXXrFKO<X1IR{%iM|wkB2jgFr*IV1m|91V}vYW^_
zdC4vYw4hapSHS_#bl{>Ej5X8;Z={d_^q%^dCF>401o0g<-&XVHnL!~hsnJH=(MD43
zqr<^RfVB__Ga3ZZ#i#0ExW>_ONA)|Hi^%-Asp8m}TrjNKetLR^0%%JXo&ynoAZX;S
z*d$T`Q*D(BawNjyCu!+5+{v&Kk=f~{Z#ss;^W55CU%Jj<XZ5HGlQuB+O8i5=h>b$#
zEgTes*#+kYVCS)Nz!E<l*X3AV9)2o}%&4n`+UCOB{4$?$y|K1}!-(bDtX-Mz#HjTG
z-A;F!-BjEbb6k#<!?PX9fH2L*MgL*;aLSWGLhNAK`ui1Z(dPiPXsu%rYKx;uT3<3h
zU4j>E9`<I;=78s#AH0~UR}iv;Lt8sQ8ae!s^Wk@fAEgG(BnnWOBv%$_KJF~SJ_~zG
zUA;44q3`-UyhDnOD(lD7W4cQI`UbZD&Zx5;*PHU*p!o3(s7U_~MSVNF{~_@Wf`1(u
z@vFbMkiJ!@M}o)|9L{A*lKu~5$9sB){te5Q(tLeitBss6<Buw`t>U}qZ))owdv5uf
zxW7LN9#-R%%&);!X>Ab;+6ovymm|?{@21NVlR)bF!{Ni(3!&UTJagBD<#zRBP)VX>
z!A>x^Qd14E%z@G>EEG!N8Ng*MHN@wZmv!{%-Ozd@)w}ypod4LPF12Z@$w@&|<$bL%
zLm&PjTCrUv`9mYozatSSoJs9&KP@=RY3Vn0c+GBUkR#`K<HWF+kU^pRb>-{4=ujWO
z;Yo2BNXX%|-0T{3Vhi{*b0@<v!kn1Kb<4qoUFiFHUGt23;zyXzU?xN!p%^?AOSx7K
zB8$%5@~_b6QP@J{WQrYVxFhbzOF)8a2O6<n=XF8`|GaKi1M_(z?+SLLdxGvE&hvhT
za~Y0Bf7=7jU7YL;uIJoVL~E#*BKAtxdA4QgNUR$AthtM<M4s&#)x*`jeCjR|hOA&O
zJf%fQVij}xU9t?Y1I`ilCVr!3zg2zb;(Kn!jPAoLf~yOgSl|9BpeZ96eR9R0yIa4U
zEC?v=TiyT9?Z5w8$$#Ddp&9sh1^?NY_%Fd<_rf=G`L{O3-vxhf*7{p?_>E}$t##{n
z;s0z7`CAkKNPXMW{r_(j`CZTN9Rq)B8ixP>KH|Un2Y%P``v%?LTD~Fw<96Ne3Vx>;
z|5iZ!{tpFz6OO-&{!ZuoEqeY&|GdqF<L~6o?;3t54F1+If%p3y{3CtvyTae=@4ppJ
z;s2rVx9a;JtMT8J{%1b$w@LsYg&Y9*uk7G=@&EMU|1M5K^>5;Td2~5Rh&K!Z0Pt@o
M`5TO%see8FKeiicg8%>k

literal 0
HcmV?d00001

diff --git a/instruments/readers/acsm_tofware_reader.py b/instruments/readers/acsm_tofware_reader.py
index a519c18..c45039c 100644
--- a/instruments/readers/acsm_tofware_reader.py
+++ b/instruments/readers/acsm_tofware_reader.py
@@ -6,7 +6,10 @@ import yaml
 
 #root_dir = os.path.abspath(os.curdir)
 #sys.path.append(root_dir)
-import utils.g5505_utils as utils
+try:
+    from dima.utils import g5505_utils as utils
+except ModuleNotFoundError:
+    import utils.g5505_utils as utils
 
 
 
diff --git a/instruments/readers/filereader_registry.py b/instruments/readers/filereader_registry.py
index fc4010d..ca745a2 100644
--- a/instruments/readers/filereader_registry.py
+++ b/instruments/readers/filereader_registry.py
@@ -3,9 +3,13 @@ import sys
 #root_dir = os.path.abspath(os.curdir)
 #sys.path.append(root_dir)
 
-from instruments.readers.xps_ibw_reader import read_xps_ibw_file_as_dict
-from instruments.readers.g5505_text_reader import read_txt_files_as_dict
-
+try:
+    from dima.instruments.readers.xps_ibw_reader import read_xps_ibw_file_as_dict
+    from dima.instruments.readers.g5505_text_reader import read_txt_files_as_dict
+    
+except ModuleNotFoundError:
+    from instruments.readers.xps_ibw_reader import read_xps_ibw_file_as_dict
+    from instruments.readers.g5505_text_reader import read_txt_files_as_dict
 
 file_extensions = ['.ibw','.txt','.dat','.h5','.TXT','.csv','.pkl','.json','.yaml']
 
diff --git a/instruments/readers/flag_reader.py b/instruments/readers/flag_reader.py
index 8534bb4..46cc249 100644
--- a/instruments/readers/flag_reader.py
+++ b/instruments/readers/flag_reader.py
@@ -8,7 +8,10 @@ import json
 #from instruments.readers import set_dima_path as configpath
 #configpath.set_dima_path()
 
-from utils import g5505_utils
+try:
+    from dima.utils import g5505_utils as utils
+except ModuleNotFoundError:
+    import utils.g5505_utils as utils
 
 
 def read_jsonflag_as_dict(path_to_file):
diff --git a/pipelines/data_integration.py b/pipelines/data_integration.py
index cd453c0..5bb5bb7 100644
--- a/pipelines/data_integration.py
+++ b/pipelines/data_integration.py
@@ -22,9 +22,15 @@ from datetime import datetime
 from itertools import chain 
 
 # Import DIMA modules
-import src.hdf5_writer as hdf5_lib
-import utils.g5505_utils as utils
-from instruments.readers import filereader_registry
+try:
+    from dima.src import hdf5_writer as hdf5_lib
+    from dima.utils import g5505_utils as utils
+    from dima.instruments.readers import filereader_registry
+except ModuleNotFoundError:
+    print(':)')
+    import src.hdf5_writer as hdf5_lib
+    import utils.g5505_utils as utils
+    from instruments.readers import filereader_registry
 
 allowed_file_extensions = filereader_registry.file_extensions
 
diff --git a/pipelines/metadata_revision.py b/pipelines/metadata_revision.py
index 0089dc6..199c886 100644
--- a/pipelines/metadata_revision.py
+++ b/pipelines/metadata_revision.py
@@ -15,7 +15,12 @@ if dimaPath not in sys.path:  # Avoid duplicate entries
 
 import h5py
 import yaml
-import src.hdf5_ops as hdf5_ops
+
+try:
+    from dima.src import hdf5_ops as hdf5_ops
+except ModuleNotFoundError:
+    import src.hdf5_ops as hdf5_ops
+
 
 
 def load_yaml(review_yaml_file):
diff --git a/src/git_ops.py b/src/git_ops.py
index b7395dc..1c55231 100644
--- a/src/git_ops.py
+++ b/src/git_ops.py
@@ -1,7 +1,12 @@
 import subprocess
 import os
-import utils.g5505_utils as utils
-from pipelines.metadata_revision import update_hdf5_file_with_review
+
+try: 
+    from dima.utils import g5505_utils as utils
+    from dima.pipelines.metadata_revision import update_hdf5_file_with_review
+except ModuleNotFoundError:
+    import utils.g5505_utils as utils
+    from pipelines.metadata_revision import update_hdf5_file_with_review
 
 def perform_git_operations(hdf5_upload):
     status_command = ['git', 'status']
diff --git a/src/hdf5_ops.py b/src/hdf5_ops.py
index ba71f73..9b8575d 100644
--- a/src/hdf5_ops.py
+++ b/src/hdf5_ops.py
@@ -17,9 +17,6 @@ if dimaPath not in sys.path:  # Avoid duplicate entries
 import h5py
 import pandas as pd
 import numpy as np
-
-import utils.g5505_utils as utils
-import src.hdf5_writer as hdf5_lib
 import logging
 import datetime
 
@@ -29,6 +26,13 @@ import yaml
 import json
 import copy
 
+try:
+    from dima.utils import g5505_utils as utils
+    from dima.src import hdf5_writer as hdf5_lib
+except ModuleNotFoundError:
+    import utils.g5505_utils as utils
+    import src.hdf5_writer as hdf5_lib
+
 class HDF5DataOpsManager():
 
     """
diff --git a/src/hdf5_writer.py b/src/hdf5_writer.py
index 3006b7e..c8f1475 100644
--- a/src/hdf5_writer.py
+++ b/src/hdf5_writer.py
@@ -1,15 +1,19 @@
 import sys
 import os
-root_dir = os.path.abspath(os.curdir)
-sys.path.append(root_dir)
+#root_dir = os.path.abspath(os.curdir)
+#sys.path.append(root_dir)
 
 import pandas as pd
 import numpy as np
 import h5py
 import logging
 
-import utils.g5505_utils as utils
-import instruments.readers.filereader_registry as filereader_registry
+try:
+    from dima.utils import g5505_utils as utils
+    from dima.instruments.readers import filereader_registry as filereader_registry
+except ModuleNotFoundError:
+    import utils.g5505_utils as utils
+    import instruments.readers.filereader_registry as filereader_registry
 
  
    
diff --git a/visualization/hdf5_vis.py b/visualization/hdf5_vis.py
index c1d8a22..1fc4948 100644
--- a/visualization/hdf5_vis.py
+++ b/visualization/hdf5_vis.py
@@ -13,8 +13,11 @@ from plotly.subplots import make_subplots
 import plotly.graph_objects as go
 import plotly.express as px
 #import plotly.io as pio
-from src.hdf5_ops import get_parent_child_relationships
 
+try:
+    from dima.src.hdf5_ops import get_parent_child_relationships
+except ModuleNotFoundError:
+    from src.hdf5_ops import get_parent_child_relationships
  
 
 def display_group_hierarchy_on_a_treemap(filename: str):
-- 
GitLab


From 02e926e0032467c6bad625b8f4cef23ec8fac2a9 Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Sat, 22 Feb 2025 17:51:56 +0100
Subject: [PATCH 6/9] Moved filereader_registry.py outside readers folder.

---
 instruments/{readers => }/filereader_registry.py | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 rename instruments/{readers => }/filereader_registry.py (100%)

diff --git a/instruments/readers/filereader_registry.py b/instruments/filereader_registry.py
similarity index 100%
rename from instruments/readers/filereader_registry.py
rename to instruments/filereader_registry.py
-- 
GitLab


From e5fdc6fa316e59b4a16d76e07fcf09ad6f34228a Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Mon, 24 Feb 2025 17:27:12 +0100
Subject: [PATCH 7/9] Update all file readers with command line interface so we
 can run them as a subprocess. Added also registry.yaml to decouple code from
 user-based instrument adaptations or extensions.

---
 instruments/filereader_registry.py         |  97 +++++++++++++----
 instruments/readers/acsm_flag_reader.py    | 101 ++++++++++++++++++
 instruments/readers/acsm_tofware_reader.py |  76 +++++++++++--
 instruments/readers/flag_reader.py         |  42 --------
 instruments/readers/g5505_text_reader.py   | 118 ++++++++++++++++++---
 instruments/readers/xps_ibw_reader.py      |  69 +++++++++++-
 instruments/registry.yaml                  |  75 +++++++++++++
 7 files changed, 494 insertions(+), 84 deletions(-)
 create mode 100644 instruments/readers/acsm_flag_reader.py
 delete mode 100644 instruments/readers/flag_reader.py
 create mode 100644 instruments/registry.yaml

diff --git a/instruments/filereader_registry.py b/instruments/filereader_registry.py
index ca745a2..1d21384 100644
--- a/instruments/filereader_registry.py
+++ b/instruments/filereader_registry.py
@@ -1,15 +1,20 @@
 import os
 import sys
+import subprocess
+import yaml
 #root_dir = os.path.abspath(os.curdir)
 #sys.path.append(root_dir)
 
-try:
-    from dima.instruments.readers.xps_ibw_reader import read_xps_ibw_file_as_dict
-    from dima.instruments.readers.g5505_text_reader import read_txt_files_as_dict
+#try:
+#    from dima.instruments.readers.xps_ibw_reader import read_xps_ibw_file_as_dict
+#    from dima.instruments.readers.g5505_text_reader import read_txt_files_as_dict
     
-except ModuleNotFoundError:
-    from instruments.readers.xps_ibw_reader import read_xps_ibw_file_as_dict
-    from instruments.readers.g5505_text_reader import read_txt_files_as_dict
+#except ModuleNotFoundError as e:
+#    print(e)    
+from instruments.readers.xps_ibw_reader import read_xps_ibw_file_as_dict
+from instruments.readers.g5505_text_reader import read_txt_files_as_dict
+from instruments.readers.acsm_tofware_reader import read_acsm_files_as_dict
+from instruments.readers.acsm_flag_reader import read_jsonflag_as_dict
 
 file_extensions = ['.ibw','.txt','.dat','.h5','.TXT','.csv','.pkl','.json','.yaml']
 
@@ -19,24 +24,37 @@ default_instruments_dir = None  # or provide an absolute path
 file_readers = {
     'ibw': lambda a1: read_xps_ibw_file_as_dict(a1),
     'txt': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
-#    'TXT': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
     'dat': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
-    'csv': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False)
-}
+    'csv': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
+    'ACSM_TOFWARE_txt' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False),
+    'ACSM_TOFWARE_csv' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False),
+    'ACSM_TOFWARE_flags_json' : lambda x: read_jsonflag_as_dict(x)}
 
-# Add new "instrument reader (Data flagging app data)"
+REGISTRY_FILE = "registry.yaml" #os.path.join(os.path.dirname(__file__), "registry.yaml")
 
-from instruments.readers.acsm_tofware_reader import read_acsm_files_as_dict
-file_extensions.append('.txt') 
-file_readers.update({'ACSM_TOFWARE_txt' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False)})
+def load_registry():
+
+    module_dir = os.path.dirname(__file__)
+    instruments_dir = os.path.join(module_dir, '..')
+
+    # Normalize the path (resolves any '..' in the path)
+    registry_path = os.path.abspath(os.path.join(module_dir,REGISTRY_FILE))
+
+    with open(registry_path, "r") as file:
+        return yaml.safe_load(file)["instruments"]
+    
+def find_reader(instrument_folder, file_extension):
 
-file_extensions.append('.csv') 
-file_readers.update({'ACSM_TOFWARE_csv' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False)})
 
-from instruments.readers.flag_reader import read_jsonflag_as_dict
-file_extensions.append('.json') 
-file_readers.update({'ACSM_TOFWARE_flags_json' : lambda x: read_jsonflag_as_dict(x)})
 
+    registry = load_registry()
+    
+    for entry in registry:
+        if entry["instrumentFolderName"] == instrument_folder and entry["fileExtension"] == file_extension:
+            return entry["fileReaderPath"], entry["InstrumentDictionaryPath"]
+    
+    return None, None  # Not found
+    
 def compute_filereader_key_from_path(hdf5_file_path):
     """Constructs the key 'instrumentname_ext' based on hdf5_file_path, structured as
     /instrumentname/to/filename.ext, which access the file reader that should be used to read such a file. 
@@ -81,4 +99,45 @@ def select_file_reader(path):
         return file_readers[extension]
     
     # Default case if no reader is found
-    return lambda x : None
\ No newline at end of file
+    return lambda x : None
+
+
+
+def run_reader(hdf5_file_path, src_file_path, dst_group_name):
+    try:
+        thisFilePath = os.path.abspath(__file__)
+    except NameError:
+        print("Error: __file__ is not available. Ensure the script is being run from a file.")
+        print("[Notice] Path to DIMA package may not be resolved properly.")
+        thisFilePath = os.getcwd()  # Use current directory or specify a default
+
+    projectPath = os.path.normpath(os.path.join(thisFilePath, "..",'..'))  # Move up to project root
+
+    # 
+    full_string, file_extension = compute_filereader_key_from_path(dst_group_name)
+    full_string_parts = full_string.split("_")
+    full_string_parts.remove(file_extension)
+    instrument_folder = '_'.join(full_string_parts)
+
+
+
+    reader_path, dict_path = find_reader(instrument_folder, file_extension)
+    
+
+    if reader_path:
+        reader_path = os.path.normpath(os.path.join(projectPath, reader_path))
+        if not os.path.exists(reader_path):
+            raise FileNotFoundError(f"File reader {reader_path} not found for key {full_string}. Verify the reader is properly referenced in registry.yaml.")
+        else:
+            print(f'Attempting to run {reader_path}')
+
+
+        command = ["python", reader_path, hdf5_file_path, src_file_path, instrument_folder]
+        #if dict_path:
+        #    args.append(dict_path)        
+        print(f"Running: {command}")
+        output = subprocess.run(command, capture_output=True)#, check=True)
+        print('Subprocess output',output.stdout)
+    else:
+        print(f'There is no file reader available to process files in {instrument_folder}.')
+        #logging.info(instFoldermsdEnd )
diff --git a/instruments/readers/acsm_flag_reader.py b/instruments/readers/acsm_flag_reader.py
new file mode 100644
index 0000000..ab190ab
--- /dev/null
+++ b/instruments/readers/acsm_flag_reader.py
@@ -0,0 +1,101 @@
+import sys
+import os
+
+try:
+    thisFilePath = os.path.abspath(__file__)
+except NameError:
+    print("Error: __file__ is not available. Ensure the script is being run from a file.")
+    print("[Notice] Path to DIMA package may not be resolved properly.")
+    thisFilePath = os.getcwd()  # Use current directory or specify a default
+
+dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..'))  # Move up to project root
+
+if dimaPath not in sys.path:  # Avoid duplicate entries
+    sys.path.insert(0,dimaPath)
+    
+import pandas as pd
+import collections
+import json
+import h5py
+import argparse
+import logging
+
+import utils.g5505_utils as utils
+
+
+
+def read_jsonflag_as_dict(path_to_file):
+
+
+    file_dict = {}
+    path_tail, path_head = os.path.split(path_to_file)
+
+    file_dict['name'] = path_head
+    # TODO: review this header dictionary, it may not be the best way to represent header data
+    file_dict['attributes_dict'] = {}
+    file_dict['datasets'] = []
+
+    try:
+        with open(path_to_file, 'r') as stream:
+            flag = json.load(stream)#, Loader=json.FullLoader)
+    except (FileNotFoundError, json.JSONDecodeError) as exc:
+        print(exc)
+
+    dataset = {}
+    dataset['name'] = 'data_table'#_numerical_variables'
+    dataset['data'] = utils.convert_attrdict_to_np_structured_array(flag) #df_numerical_attrs.to_numpy()
+    dataset['shape'] = dataset['data'].shape
+    dataset['dtype'] = type(dataset['data'])  
+
+    file_dict['datasets'].append(dataset)
+
+    return file_dict
+
+if __name__ == "__main__":
+
+    from src.hdf5_ops import save_file_dict_to_hdf5
+    from utils.g5505_utils import created_at
+
+    # Set up argument parsing
+    parser = argparse.ArgumentParser(description="Data ingestion process to HDF5 files.")
+    parser.add_argument('dst_file_path', type=str, help="Path to the target HDF5 file.")
+    parser.add_argument('src_file_path', type=str, help="Relative path to source file to be saved to target HDF5 file.")
+    parser.add_argument('dst_group_name', type=str, help="Group name '/instFolder/[category]/fileName' in the target HDF5 file.")
+
+    args = parser.parse_args()
+
+    hdf5_file_path = args.dst_file_path
+    src_file_path = args.src_file_path
+    dst_group_name = args.dst_group_name
+    default_mode = 'r+'
+
+    try:
+        # Read source file and return an internal dictionary representation
+        idr_dict = read_jsonflag_as_dict(src_file_path)
+
+        if not os.path.exists(hdf5_file_path):
+            default_mode = 'w'
+
+        print(f'Opening HDF5 file: {hdf5_file_path} in mode {default_mode}')
+        
+        with h5py.File(hdf5_file_path, mode=default_mode, track_order=True) as hdf5_file_obj:
+            try:
+                # Create group if it does not exist
+                if dst_group_name not in hdf5_file_obj:
+                    hdf5_file_obj.create_group(dst_group_name)
+                    hdf5_file_obj[dst_group_name].attrs['creation_date'] = created_at().encode('utf-8')
+                    print(f'Created new group: {dst_group_name}')
+                else:
+                    print(f'Group {dst_group_name} already exists. Proceeding with data transfer...')
+
+            except Exception as inst:
+                logging.error('Failed to create group %s in HDF5: %s', dst_group_name, inst)
+
+            # Save dictionary to HDF5
+            save_file_dict_to_hdf5(hdf5_file_obj, dst_group_name, idr_dict)
+            print(f'Completed saving file dict with keys: {idr_dict.keys()}')
+
+    except Exception as e:
+        logging.error('File reader failed to process %s: %s', src_file_path, e)
+        print(f'File reader failed to process {src_file_path}. See logs for details.')
+
diff --git a/instruments/readers/acsm_tofware_reader.py b/instruments/readers/acsm_tofware_reader.py
index c45039c..8579660 100644
--- a/instruments/readers/acsm_tofware_reader.py
+++ b/instruments/readers/acsm_tofware_reader.py
@@ -1,15 +1,26 @@
 import sys
 import os
+
+try:
+    thisFilePath = os.path.abspath(__file__)
+except NameError:
+    print("Error: __file__ is not available. Ensure the script is being run from a file.")
+    print("[Notice] Path to DIMA package may not be resolved properly.")
+    thisFilePath = os.getcwd()  # Use current directory or specify a default
+
+dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..'))  # Move up to project root
+
+if dimaPath not in sys.path:  # Avoid duplicate entries
+    sys.path.insert(0,dimaPath)
+    
 import pandas as pd
 import collections
 import yaml
+import h5py
+import argparse
+import logging
 
-#root_dir = os.path.abspath(os.curdir)
-#sys.path.append(root_dir)
-try:
-    from dima.utils import g5505_utils as utils
-except ModuleNotFoundError:
-    import utils.g5505_utils as utils
+import utils.g5505_utils as utils
 
 
 
@@ -223,4 +234,55 @@ def read_acsm_files_as_dict(filename: str, instruments_dir: str = None, work_wit
     except:
         return {}
 
-    return file_dict
\ No newline at end of file
+    return file_dict
+
+
+
+if __name__ == "__main__":
+
+    from src.hdf5_ops import save_file_dict_to_hdf5
+    from utils.g5505_utils import created_at
+
+    # Set up argument parsing
+    parser = argparse.ArgumentParser(description="Data ingestion process to HDF5 files.")
+    parser.add_argument('dst_file_path', type=str, help="Path to the target HDF5 file.")
+    parser.add_argument('src_file_path', type=str, help="Relative path to source file to be saved to target HDF5 file.")
+    parser.add_argument('dst_group_name', type=str, help="Group name '/instFolder/[category]/fileName' in the target HDF5 file.")
+
+    args = parser.parse_args()
+
+    hdf5_file_path = args.dst_file_path
+    src_file_path = args.src_file_path
+    dst_group_name = args.dst_group_name
+    default_mode = 'r+'
+
+    try:
+        # Read source file and return an internal dictionary representation
+        idr_dict = read_acsm_files_as_dict(src_file_path)
+
+        if not os.path.exists(hdf5_file_path):
+            default_mode = 'w'
+
+        print(f'Opening HDF5 file: {hdf5_file_path} in mode {default_mode}')
+        
+        with h5py.File(hdf5_file_path, mode=default_mode, track_order=True) as hdf5_file_obj:
+            try:
+                # Create group if it does not exist
+                if dst_group_name not in hdf5_file_obj:
+                    hdf5_file_obj.create_group(dst_group_name)
+                    hdf5_file_obj[dst_group_name].attrs['creation_date'] = created_at().encode('utf-8')
+                    print(f'Created new group: {dst_group_name}')
+                else:
+                    print(f'Group {dst_group_name} already exists. Proceeding with data transfer...')
+
+            except Exception as inst:
+                logging.error('Failed to create group %s in HDF5: %s', dst_group_name, inst)
+
+            # Save dictionary to HDF5
+            save_file_dict_to_hdf5(hdf5_file_obj, dst_group_name, idr_dict)
+            print(f'Completed saving file dict with keys: {idr_dict.keys()}')
+
+    except Exception as e:
+        logging.error('File reader failed to process %s: %s', src_file_path, e)
+        print(f'File reader failed to process {src_file_path}. See logs for details.')
+
diff --git a/instruments/readers/flag_reader.py b/instruments/readers/flag_reader.py
deleted file mode 100644
index 46cc249..0000000
--- a/instruments/readers/flag_reader.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-import json
-
-#root_dir = os.path.abspath(os.curdir)
-#sys.path.append(root_dir)
-#print(__file__)
-
-#from instruments.readers import set_dima_path as configpath
-#configpath.set_dima_path()
-
-try:
-    from dima.utils import g5505_utils as utils
-except ModuleNotFoundError:
-    import utils.g5505_utils as utils
-
-
-def read_jsonflag_as_dict(path_to_file):
-
-
-    file_dict = {}
-    path_tail, path_head = os.path.split(path_to_file)
-
-    file_dict['name'] = path_head
-    # TODO: review this header dictionary, it may not be the best way to represent header data
-    file_dict['attributes_dict'] = {}
-    file_dict['datasets'] = []
-
-    try:
-        with open(path_to_file, 'r') as stream:
-            flag = json.load(stream)#, Loader=json.FullLoader)
-    except (FileNotFoundError, json.JSONDecodeError) as exc:
-        print(exc)
-
-    dataset = {}
-    dataset['name'] = 'data_table'#_numerical_variables'
-    dataset['data'] = g5505_utils.convert_attrdict_to_np_structured_array(flag) #df_numerical_attrs.to_numpy()
-    dataset['shape'] = dataset['data'].shape
-    dataset['dtype'] = type(dataset['data'])  
-
-    file_dict['datasets'].append(dataset)
-
-    return file_dict
\ No newline at end of file
diff --git a/instruments/readers/g5505_text_reader.py b/instruments/readers/g5505_text_reader.py
index 396e0e1..262959d 100644
--- a/instruments/readers/g5505_text_reader.py
+++ b/instruments/readers/g5505_text_reader.py
@@ -1,19 +1,40 @@
 import sys
 import os
+
+try:
+    thisFilePath = os.path.abspath(__file__)
+except NameError:
+    print("Error: __file__ is not available. Ensure the script is being run from a file.")
+    print("[Notice] Path to DIMA package may not be resolved properly.")
+    thisFilePath = os.getcwd()  # Use current directory or specify a default
+
+dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..'))  # Move up to project root
+
+if dimaPath not in sys.path:  # Avoid duplicate entries
+    sys.path.insert(0,dimaPath)
+    
 import pandas as pd
 import collections
 import yaml
-
+import h5py
+import argparse
+import logging
 # Import project modules
-root_dir = os.path.abspath(os.curdir)
-sys.path.append(root_dir)
-
-import utils.g5505_utils as utils
+#root_dir = os.path.abspath(os.curdir)
+#sys.path.append(root_dir)
 
 
+#try:
+#    from dima.utils import g5505_utils as utils
+#except ModuleNotFoundError:
+#    import utils.g5505_utils as utils
+#    import src.hdf5_ops as hdf5_ops
+import utils.g5505_utils as utils
 
 
 def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with_copy: bool = True):
+
+    filename = os.path.normpath(filename)
     # If instruments_dir is not provided, use the default path relative to the module directory
     if not instruments_dir:
         # Assuming the instruments folder is one level up from the source module directory
@@ -23,6 +44,8 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
     # Normalize the path (resolves any '..' in the path)
     instrument_configs_path = os.path.abspath(os.path.join(instruments_dir,'readers','config_text_reader.yaml'))
 
+    print(instrument_configs_path)
+
     with open(instrument_configs_path,'r') as stream:
         try:
             config_dict = yaml.load(stream, Loader=yaml.FullLoader)
@@ -44,7 +67,9 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
     description_dict = {}
 
     for instFolder in config_dict.keys():
+        
         if instFolder in filename.split(os.sep):
+            
             file_encoding = config_dict[instFolder].get('file_encoding',file_encoding)
             separator = config_dict[instFolder].get('separator',separator)
             table_header = config_dict[instFolder].get('table_header',table_header)
@@ -76,6 +101,7 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
     #with open(tmp_filename,'rb',encoding=file_encoding,errors='ignore') as f:
 
     if not isinstance(table_header, list):
+        
         table_header = [table_header]
         file_encoding = [file_encoding]
         separator = [separator]
@@ -87,14 +113,17 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
         with open(tmp_filename,'rb') as f:
             
             for line_number, line in enumerate(f):   
-
+                decoded_line = line.decode(file_encoding[tb_idx])
                 
+
                 for tb_idx, tb in enumerate(table_header):
-                    if tb in line.decode(file_encoding[tb_idx]):
+                    print(tb)
+                    if tb in decoded_line:                    
                         break
-
-                if tb in line.decode(file_encoding[tb_idx]):   
-                    list_of_substrings = line.decode(file_encoding[tb_idx]).split(separator[tb_idx].replace('\\t','\t'))  
+                
+                if tb in decoded_line:   
+                    
+                    list_of_substrings = decoded_line.split(separator[tb_idx].replace('\\t','\t'))  
 
                     # Count occurrences of each substring
                     substring_counts = collections.Counter(list_of_substrings)
@@ -109,9 +138,11 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
 
                     #print(line_number, len(column_names ),'\n')
                     break
+                else:
+                    print('Table header was not detected.')
                 # Subdivide line into words, and join them by single space. 
                 # I asumme this can produce a cleaner line that contains no weird separator characters \t \r or extra spaces and so on.
-                list_of_substrings = line.decode(file_encoding[tb_idx]).split()
+                list_of_substrings = decoded_line.split()
                 # TODO: ideally we should use a multilinear string but the yalm parser is not recognizing \n as special character
                 #line = ' '.join(list_of_substrings+['\n'])
                 #line = ' '.join(list_of_substrings)     
@@ -119,8 +150,13 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
 
   
     # TODO: it does not work with separator as none :(. fix for RGA
+    
     try:
+        print(column_names)
         if not 'infer' in table_header:
+            #print(table_header)
+            #print(file_encoding[tb_idx])
+            
             df = pd.read_csv(tmp_filename, 
                             delimiter = separator[tb_idx].replace('\\t','\t'), 
                             header=line_number, 
@@ -138,7 +174,7 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
         df_numerical_attrs = df.select_dtypes(include ='number')
         df_categorical_attrs = df.select_dtypes(exclude='number')
         numerical_variables = [item for item in df_numerical_attrs.columns]       
-
+        
         # Consolidate into single timestamp column the separate columns 'date' 'time' specified in text_data_source.yaml
         if timestamp_variables:
             #df_categorical_attrs['timestamps'] = [' '.join(df_categorical_attrs.loc[i,timestamp_variables].to_numpy()) for i in df.index]
@@ -148,7 +184,7 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
             #df_categorical_attrs['timestamps'] = df_categorical_attrs[timestamp_variables].astype(str).agg(' '.join, axis=1)
             timestamps_name = ' '.join(timestamp_variables)
             df_categorical_attrs[ timestamps_name] = df_categorical_attrs[timestamp_variables].astype(str).agg(' '.join, axis=1)
-
+        
             valid_indices = []
             if datetime_format:
                 df_categorical_attrs[ timestamps_name] = pd.to_datetime(df_categorical_attrs[ timestamps_name],format=datetime_format,errors='coerce')
@@ -249,7 +285,59 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
         #    if timestamps_name in categorical_variables:
         #        dataset['attributes'] = {timestamps_name: utils.parse_attribute({'unit':'YYYY-MM-DD HH:MM:SS.ffffff'})}
         #    file_dict['datasets'].append(dataset) 
-    except:
+    except Exception as e:
+        print(e)
         return {}
 
-    return file_dict
\ No newline at end of file
+    return file_dict
+
+
+
+if __name__ == "__main__":
+
+    from src.hdf5_ops import save_file_dict_to_hdf5
+    from utils.g5505_utils import created_at
+
+    # Set up argument parsing
+    parser = argparse.ArgumentParser(description="Data ingestion process to HDF5 files.")
+    parser.add_argument('dst_file_path', type=str, help="Path to the target HDF5 file.")
+    parser.add_argument('src_file_path', type=str, help="Relative path to source file to be saved to target HDF5 file.")
+    parser.add_argument('dst_group_name', type=str, help="Group name '/instFolder/[category]/fileName' in the target HDF5 file.")
+
+    args = parser.parse_args()
+
+    hdf5_file_path = args.dst_file_path
+    src_file_path = args.src_file_path
+    dst_group_name = args.dst_group_name
+    default_mode = 'r+'
+
+    try:
+        # Read source file and return an internal dictionary representation
+        idr_dict = read_txt_files_as_dict(src_file_path)
+
+        if not os.path.exists(hdf5_file_path):
+            default_mode = 'w'
+
+        print(f'Opening HDF5 file: {hdf5_file_path} in mode {default_mode}')
+        
+        with h5py.File(hdf5_file_path, mode=default_mode, track_order=True) as hdf5_file_obj:
+            try:
+                # Create group if it does not exist
+                if dst_group_name not in hdf5_file_obj:
+                    hdf5_file_obj.create_group(dst_group_name)
+                    hdf5_file_obj[dst_group_name].attrs['creation_date'] = created_at().encode('utf-8')
+                    print(f'Created new group: {dst_group_name}')
+                else:
+                    print(f'Group {dst_group_name} already exists. Proceeding with data transfer...')
+
+            except Exception as inst:
+                logging.error('Failed to create group %s in HDF5: %s', dst_group_name, inst)
+
+            # Save dictionary to HDF5
+            save_file_dict_to_hdf5(hdf5_file_obj, dst_group_name, idr_dict)
+            print(f'Completed saving file dict with keys: {idr_dict.keys()}')
+
+    except Exception as e:
+        logging.error('File reader failed to process %s: %s', src_file_path, e)
+        print(f'File reader failed to process {src_file_path}. See logs for details.')
+
diff --git a/instruments/readers/xps_ibw_reader.py b/instruments/readers/xps_ibw_reader.py
index b3881c6..cdb022c 100644
--- a/instruments/readers/xps_ibw_reader.py
+++ b/instruments/readers/xps_ibw_reader.py
@@ -1,5 +1,10 @@
 import os
+import sys
+import h5py
+
 from igor2.binarywave import load as loadibw
+import logging
+import argparse
 
 def read_xps_ibw_file_as_dict(filename):
     """
@@ -76,4 +81,66 @@ def read_xps_ibw_file_as_dict(filename):
     file_dict['datasets'].append(dataset)
     
 
-    return file_dict
\ No newline at end of file
+    return file_dict
+
+if __name__ == "__main__":
+
+
+    try:
+        thisFilePath = os.path.abspath(__file__)
+    except NameError:
+        print("Error: __file__ is not available. Ensure the script is being run from a file.")
+        print("[Notice] Path to DIMA package may not be resolved properly.")
+        thisFilePath = os.getcwd()  # Use current directory or specify a default
+
+    dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..'))  # Move up to project root
+
+    if dimaPath not in sys.path:  # Avoid duplicate entries
+        sys.path.insert(0,dimaPath)
+
+    from src.hdf5_ops import save_file_dict_to_hdf5
+    from utils.g5505_utils import created_at
+
+    # Set up argument parsing
+    parser = argparse.ArgumentParser(description="Data ingestion process to HDF5 files.")
+    parser.add_argument('dst_file_path', type=str, help="Path to the target HDF5 file.")
+    parser.add_argument('src_file_path', type=str, help="Relative path to source file to be saved to target HDF5 file.")
+    parser.add_argument('dst_group_name', type=str, help="Group name '/instFolder/[category]/fileName' in the target HDF5 file.")
+
+    args = parser.parse_args()
+
+    hdf5_file_path = args.dst_file_path
+    src_file_path = args.src_file_path
+    dst_group_name = args.dst_group_name
+    default_mode = 'r+'
+
+    try:
+        # Read source file and return an internal dictionary representation
+        idr_dict = read_xps_ibw_file_as_dict(src_file_path)
+
+        if not os.path.exists(hdf5_file_path):
+            default_mode = 'w'
+
+        print(f'Opening HDF5 file: {hdf5_file_path} in mode {default_mode}')
+        
+        with h5py.File(hdf5_file_path, mode=default_mode, track_order=True) as hdf5_file_obj:
+            try:
+                # Create group if it does not exist
+                if dst_group_name not in hdf5_file_obj:
+                    hdf5_file_obj.create_group(dst_group_name)
+                    hdf5_file_obj[dst_group_name].attrs['creation_date'] = created_at().encode('utf-8')
+                    print(f'Created new group: {dst_group_name}')
+                else:
+                    print(f'Group {dst_group_name} already exists. Proceeding with data transfer...')
+
+            except Exception as inst:
+                logging.error('Failed to create group %s in HDF5: %s', dst_group_name, inst)
+
+            # Save dictionary to HDF5
+            save_file_dict_to_hdf5(hdf5_file_obj, dst_group_name, idr_dict)
+            print(f'Completed saving file dict with keys: {idr_dict.keys()}')
+
+    except Exception as e:
+        logging.error('File reader failed to process %s: %s', src_file_path, e)
+        print(f'File reader failed to process {src_file_path}. See logs for details.')
+
diff --git a/instruments/registry.yaml b/instruments/registry.yaml
new file mode 100644
index 0000000..797226c
--- /dev/null
+++ b/instruments/registry.yaml
@@ -0,0 +1,75 @@
+instruments:
+  - instrumentFolderName: default
+    fileExtension: csv
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: null
+
+  - instrumentFolderName: NEXAFS
+    fileExtension: h5
+    fileReaderPath: null
+    InstrumentDictionaryPath: null
+
+  - instrumentFolderName: SES
+    fileExtension: ibw
+    fileReaderPath: instruments/readers/xps_ibw_reader.py
+    InstrumentDictionaryPath: null
+
+  - instrumentFolderName: RGA
+    fileExtension: txt
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/RGA.yaml
+
+  - instrumentFolderName: Pressure
+    fileExtension: dat
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/Pressure.yaml
+
+  - instrumentFolderName: Humidity_Sensors
+    fileExtension: dat
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/Humidity_Sensors.yaml
+
+  - instrumentFolderName: ICAD
+    fileExtension: dat
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/ICAD.yaml
+
+  - instrumentFolderName: Lopap
+    fileExtension: dat
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/Lopap.yaml
+
+  - instrumentFolderName: T200_NOx
+    fileExtension: dat
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/T200_NOx.yaml
+
+  - instrumentFolderName: T360U_CO2
+    fileExtension: dat
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/T360U_CO2.yaml
+
+  - instrumentFolderName: htof
+    fileExtension: h5
+    fileReaderPath: null
+    InstrumentDictionaryPath: null
+
+  - instrumentFolderName: smps
+    fileExtension: txt
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/smps.yaml
+
+  - instrumentFolderName: gas
+    fileExtension: txt
+    fileReaderPath: instruments/readers/g5505_text_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/gas.yaml
+
+  - instrumentFolderName: ACSM_TOFWARE
+    fileExtension: txt
+    fileReaderPath: instruments/readers/acsm_tofware_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/ACSM_TOFWARE.yaml
+
+  - instrumentFolderName: ACSM_TOFWARE
+    fileExtension: csv
+    fileReaderPath: instruments/readers/acsm_tofware_reader.py
+    InstrumentDictionaryPath: instruments/dictionaries/ACSM_TOFWARE.yaml
-- 
GitLab


From 68344964acc38126465dc4e2976c0da89c9cf1ca Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Mon, 24 Feb 2025 18:48:03 +0100
Subject: [PATCH 8/9] Implemented create_hdf5_from_filesystem_new() using new
 instrument readers cml interface and subprocesses. This facilitates extension
 of file reading capabilities by collaborators without requiring changes to
 file_registry.py. Only additions in folders and registry.yaml.

---
 src/hdf5_ops.py    |  76 +++++++++++-
 src/hdf5_writer.py | 285 ++++++++++++++++++++++++++++++++-------------
 2 files changed, 275 insertions(+), 86 deletions(-)

diff --git a/src/hdf5_ops.py b/src/hdf5_ops.py
index 9b8575d..4c4bc92 100644
--- a/src/hdf5_ops.py
+++ b/src/hdf5_ops.py
@@ -26,12 +26,12 @@ import yaml
 import json
 import copy
 
-try:
-    from dima.utils import g5505_utils as utils
-    from dima.src import hdf5_writer as hdf5_lib
-except ModuleNotFoundError:
-    import utils.g5505_utils as utils
-    import src.hdf5_writer as hdf5_lib
+#try:
+#    from dima.utils import g5505_utils as utils
+#    from dima.src import hdf5_writer as hdf5_lib
+#except ModuleNotFoundError:
+import utils.g5505_utils as utils
+import src.hdf5_writer as hdf5_lib
 
 class HDF5DataOpsManager():
 
@@ -706,3 +706,67 @@ if __name__ == "__main__":
 
     #run(sys.argv[2])
 
+
+def save_file_dict_to_hdf5(h5file, group_name, file_dict):
+    """
+    Transfers data from a file_dict to an HDF5 file.
+
+    Parameters
+    ----------
+    h5file : h5py.File
+        HDF5 file object where the data will be written.
+    group_name : str
+        Name of the HDF5 group where data will be stored.
+    file_dict : dict
+        Dictionary containing file data to be transferred. Required structure:
+        {
+            'name': str,
+            'attributes_dict': dict,
+            'datasets': [
+                {
+                    'name': str,
+                    'data': array-like,
+                    'shape': tuple,
+                    'attributes': dict (optional)
+                },
+                ...
+            ]
+        }
+
+    Returns
+    -------
+    None
+    """
+
+    if not file_dict:
+        return
+
+    try:
+        # Create group and add their attributes
+        filename = file_dict['name']
+        group = h5file[group_name].create_group(name=filename)
+        # Add group attributes                                
+        group.attrs.update(file_dict['attributes_dict'])
+        
+        # Add datasets to the just created group
+        for dataset in file_dict['datasets']:
+            dataset_obj = group.create_dataset(
+                name=dataset['name'], 
+                data=dataset['data'],
+                shape=dataset['shape']
+            )
+            
+            # Add dataset's attributes                                
+            attributes = dataset.get('attributes', {})
+            dataset_obj.attrs.update(attributes)
+        group.attrs['last_update_date'] = utils.created_at().encode('utf-8')
+
+        stdout = f'Completed transfer for /{group_name}/{filename}'
+        print(stdout)
+
+    except Exception as inst:         
+        logging.error('Failed to transfer data into HDF5: %s', inst)
+        return -1
+
+    return 0
+
diff --git a/src/hdf5_writer.py b/src/hdf5_writer.py
index c8f1475..3113bea 100644
--- a/src/hdf5_writer.py
+++ b/src/hdf5_writer.py
@@ -8,93 +8,38 @@ import numpy as np
 import h5py
 import logging
 
-try:
-    from dima.utils import g5505_utils as utils
-    from dima.instruments.readers import filereader_registry as filereader_registry
-except ModuleNotFoundError:
-    import utils.g5505_utils as utils
-    import instruments.readers.filereader_registry as filereader_registry
+#try:
+#    from dima.utils import g5505_utils as utils
+#    from dima.src import hdf5_ops
+#    from dima.instruments import filereader_registry as filereader_registry
+#except ModuleNotFoundError:
+import utils.g5505_utils as utils
+import src.hdf5_ops as hdf5_ops
+import instruments.filereader_registry as filereader_registry
 
  
    
-def __transfer_file_dict_to_hdf5(h5file, group_name, file_dict):
-    """
-    Transfers data from a file_dict to an HDF5 file.
-
-    Parameters
-    ----------
-    h5file : h5py.File
-        HDF5 file object where the data will be written.
-    group_name : str
-        Name of the HDF5 group where data will be stored.
-    file_dict : dict
-        Dictionary containing file data to be transferred. Required structure:
-        {
-            'name': str,
-            'attributes_dict': dict,
-            'datasets': [
-                {
-                    'name': str,
-                    'data': array-like,
-                    'shape': tuple,
-                    'attributes': dict (optional)
-                },
-                ...
-            ]
-        }
-
-    Returns
-    -------
-    None
-    """
-
-    if not file_dict:
-        return
 
-    try:
-        # Create group and add their attributes
-        filename = file_dict['name']
-        group = h5file[group_name].create_group(name=filename)
-        # Add group attributes                                
-        group.attrs.update(file_dict['attributes_dict'])
-        
-        # Add datasets to the just created group
-        for dataset in file_dict['datasets']:
-            dataset_obj = group.create_dataset(
-                name=dataset['name'], 
-                data=dataset['data'],
-                shape=dataset['shape']
-            )
-            
-            # Add dataset's attributes                                
-            attributes = dataset.get('attributes', {})
-            dataset_obj.attrs.update(attributes)
-        group.attrs['last_update_date'] = utils.created_at().encode('utf-8')
 
-        stdout = f'Completed transfer for /{group_name}/{filename}'
-
-    except Exception as inst: 
-        stdout = inst
-        logging.error('Failed to transfer data into HDF5: %s', inst)
-
-    return stdout
+def __copy_file_in_group(path_to_output_file, source_file_path, dest_group_name, work_with_copy : bool = True):
 
-def __copy_file_in_group(source_file_path, dest_file_obj : h5py.File, dest_group_name, work_with_copy : bool = True):
     # Create copy of original file to avoid possible file corruption and work with it.
+    with h5py.File(path_to_output_file, mode='r+', track_order=True) as dest_file_obj:
 
-    if work_with_copy:
-        tmp_file_path = utils.make_file_copy(source_file_path)
-    else:
-        tmp_file_path = source_file_path
+        if work_with_copy:
+            tmp_file_path = utils.make_file_copy(source_file_path)
+        else:
+            tmp_file_path = source_file_path
 
-    # Open backup h5 file and copy complet filesystem directory onto a group in h5file
-    with h5py.File(tmp_file_path,'r') as src_file:
-        dest_file_obj.copy(source= src_file['/'], dest= dest_group_name)
+        # Open backup h5 file and copy complet filesystem directory onto a group in h5file
+        with h5py.File(tmp_file_path,'r') as src_file:
+            dest_file_obj.copy(source= src_file['/'], dest= dest_group_name)
 
-    if 'tmp_files' in tmp_file_path:
-        os.remove(tmp_file_path)
+        if 'tmp_files' in tmp_file_path:
+            os.remove(tmp_file_path)
 
-    stdout = f'Completed transfer for /{dest_group_name}'
+        stdout = f'Completed transfer for /{dest_group_name}'
+    
     return stdout
 
 def create_hdf5_file_from_filesystem_path(path_to_input_directory: str, 
@@ -220,16 +165,16 @@ def create_hdf5_file_from_filesystem_path(path_to_input_directory: str,
 
                     # hdf5 path to filename group 
                     dest_group_name = f'{group_name}/{filename}'
+                    source_file_path = os.path.join(dirpath,filename)
 
                     if not 'h5' in filename:
                         #file_dict = config_file.select_file_readers(group_id)[file_ext](os.path.join(dirpath,filename))
                         #file_dict = ext_to_reader_dict[file_ext](os.path.join(dirpath,filename))
-                        file_dict = filereader_registry.select_file_reader(dest_group_name)(os.path.join(dirpath,filename))
+                        file_dict = filereader_registry.select_file_reader(dest_group_name)(source_file_path)
 
-                        stdout = __transfer_file_dict_to_hdf5(h5file, group_name, file_dict)
+                        stdout = hdf5_ops.save_file_dict_to_hdf5(h5file, group_name, file_dict)
                         
                     else:
-                        source_file_path = os.path.join(dirpath,filename)
                         dest_file_obj = h5file
                         #group_name +'/'+filename
                         #ext_to_reader_dict[file_ext](source_file_path, dest_file_obj, dest_group_name)
@@ -258,6 +203,186 @@ def create_hdf5_file_from_filesystem_path(path_to_input_directory: str,
 
     return path_to_output_file #, output_yml_filename_path
 
+def create_hdf5_file_from_filesystem_path_new(path_to_input_directory: str, 
+                                          path_to_filenames_dict: dict = None,
+                                          select_dir_keywords : list = [],
+                                          root_metadata_dict : dict = {}, mode = 'w'):
+
+    """
+    Creates an .h5 file with name "output_filename" that preserves the directory tree (or folder structure)
+    of a given filesystem path.
+
+    The data integration capabilities are limited by our file reader, which can only access data from a list of
+    admissible file formats. These, however, can be extended. Directories are groups in the resulting HDF5 file.
+    Files are formatted as composite objects consisting of a group, file, and attributes.
+
+    Parameters
+    ----------
+    output_filename : str
+        Name of the output HDF5 file.
+    path_to_input_directory : str
+        Path to root directory, specified with forward slashes, e.g., path/to/root.
+
+    path_to_filenames_dict : dict, optional
+        A pre-processed dictionary where keys are directory paths on the input directory's tree and values are lists of files.
+        If provided, 'input_file_system_path' is ignored.
+
+    select_dir_keywords : list
+        List of string elements to consider or select only directory paths that contain
+                                a word in 'select_dir_keywords'. When empty, all directory paths are considered
+                                to be included in the HDF5 file group hierarchy.
+    root_metadata_dict : dict
+        Metadata to include at the root level of the HDF5 file.
+
+    mode : str
+        'w' create File, truncate if it exists, or 'r+' read/write, File must exists. By default, mode = "w".
+
+    Returns
+    -------
+    output_filename : str
+        Path to the created HDF5 file.
+    """
+
+
+    if not mode in ['w','r+']:
+        raise ValueError(f'Parameter mode must take values in ["w","r+"]')
+    
+    if not '/' in path_to_input_directory:
+        raise  ValueError('path_to_input_directory needs to be specified using forward slashes "/".' )
+
+    #path_to_output_directory = os.path.join(path_to_input_directory,'..')
+    path_to_input_directory = os.path.normpath(path_to_input_directory).rstrip(os.sep)    
+
+    
+    for i, keyword in enumerate(select_dir_keywords):
+        select_dir_keywords[i] = keyword.replace('/',os.sep)          
+
+    if not path_to_filenames_dict:
+        # On dry_run=True, returns path to files dictionary of the output directory without making a actual copy of the input directory. 
+        # Therefore, there wont be a copying conflict by setting up input and output directories the same
+        path_to_filenames_dict = utils.copy_directory_with_contraints(input_dir_path=path_to_input_directory, 
+                                                                      output_dir_path=path_to_input_directory,
+                                                                      dry_run=True)
+    # Set input_directory as copied input directory
+    root_dir = path_to_input_directory
+    path_to_output_file = path_to_input_directory.rstrip(os.path.sep) + '.h5'
+
+    start_message = f'\n[Start] Data integration :\nSource: {path_to_input_directory}\nDestination: {path_to_output_file}\n'
+    
+    print(start_message)
+    logging.info(start_message)
+
+    # Check if the .h5 file already exists
+    if os.path.exists(path_to_output_file) and mode in ['w']:
+        message = (
+            f"[Notice] The file '{path_to_output_file}' already exists and will not be overwritten.\n"
+            "If you wish to replace it, please delete the existing file first and rerun the program."
+        )
+        print(message)
+        logging.error(message)
+    else:
+        with h5py.File(path_to_output_file, mode=mode, track_order=True) as h5file:
+            print('Created file')
+
+        number_of_dirs = len(path_to_filenames_dict.keys())
+        dir_number = 1
+        for dirpath, filtered_filenames_list in path_to_filenames_dict.items():            
+        
+            # Check if filtered_filenames_list is nonempty. TODO: This is perhaps redundant by design of path_to_filenames_dict. 
+            if not filtered_filenames_list:
+                continue
+
+            group_name = dirpath.replace(os.sep,'/')
+            group_name = group_name.replace(root_dir.replace(os.sep,'/') + '/', '/')
+
+            # Flatten group name to two level
+            if select_dir_keywords:
+                offset = sum([len(i.split(os.sep)) if i in dirpath else 0 for i in select_dir_keywords])
+            else:
+                offset = 2
+            tmp_list = group_name.split('/')
+            if len(tmp_list) > offset+1:
+                group_name = '/'.join([tmp_list[i] for i in range(offset+1)])   
+
+        #    try:
+        #        # Create group called "group_name". Hierarchy of nested groups can be implicitly defined by the forward slashes
+        #        if not group_name in h5file.keys():                    
+        #            h5file.create_group(group_name)
+        #            h5file[group_name].attrs['creation_date'] = utils.created_at().encode('utf-8')
+        #            #h5file[group_name].attrs.create(name='filtered_file_list',data=convert_string_to_bytes(filtered_filename_list))
+        #            #h5file[group_name].attrs.create(name='file_list',data=convert_string_to_bytes(filenames_list))
+        #        #else:                           
+        #            #print(group_name,' was already created.') 
+        #            instFoldermsgStart = f'Starting data transfer from instFolder: {group_name}'
+        #            print(instFoldermsgStart)
+
+        #    except Exception as inst: 
+        #        stdout = inst
+        #        logging.error('Failed to create group %s into HDF5: %s', group_name, inst)
+
+            for filenumber, filename in enumerate(filtered_filenames_list):
+                
+                #file_ext = os.path.splitext(filename)[1]
+                #try: 
+
+                # hdf5 path to filename group 
+                dest_group_name = f'{group_name}/{filename}'
+                source_file_path = os.path.join(dirpath,filename)
+
+                if not 'h5' in filename:
+                    #file_dict = config_file.select_file_readers(group_id)[file_ext](os.path.join(dirpath,filename))
+                    #file_dict = ext_to_reader_dict[file_ext](os.path.join(dirpath,filename))
+
+                    # TODO: Run save_file_dict_to_hdf5 from reader.py using command line interface 
+                    #file_dict = filereader_registry.select_file_reader(dest_group_name)(os.path.join(dirpath,filename))
+
+                    #stdout = hdf5_ops.save_file_dict_to_hdf5(h5file, group_name, file_dict)
+
+                    filereader_registry.run_reader(path_to_output_file, source_file_path, dest_group_name)
+                    
+                else:
+                
+                    #try:
+                    #    # Create group if it does not exist
+                    #    if dest_group_name not in dest_file_obj:
+                    #        dest_file_obj.create_group(dest_group_name)
+                    #        dest_file_obj[dest_group_name].attrs['creation_date'] = utils.created_at().encode('utf-8')
+                    #        print(f'Created new group: {dest_group_name}')
+                    #    else:
+                    #        print(f'Group {dest_group_name} already exists. Proceeding with data transfer...')
+
+                    #except Exception as inst:
+                    #    logging.error('Failed to create group %s in HDF5: %s', dest_group_name, inst)
+        
+                    
+                    #group_name +'/'+filename
+                    #ext_to_reader_dict[file_ext](source_file_path, dest_file_obj, dest_group_name)
+                    #g5505f_reader.select_file_reader(dest_group_name)(source_file_path, dest_file_obj, dest_group_name)
+                    stdout = __copy_file_in_group(path_to_output_file, source_file_path, dest_group_name, False)
+
+            # Update the progress bar and log the end message
+            instFoldermsdEnd = f'\nCompleted data transfer for instFolder: {group_name}\n'
+            # Print and log the start message
+            utils.progressBar(dir_number, number_of_dirs, instFoldermsdEnd)
+            logging.info(instFoldermsdEnd )
+            dir_number = dir_number + 1
+
+        print('[End] Data integration')
+        logging.info('[End] Data integration')
+    
+        if len(root_metadata_dict.keys())>0:
+            with h5py.File(path_to_output_file, mode='r+', track_order=True) as h5file:
+                for key, value in root_metadata_dict.items():
+                    #if key in h5file.attrs:
+                    #    del h5file.attrs[key]
+                    h5file.attrs.create(key, value)
+            #annotate_root_dir(output_filename,root_metadata_dict)  
+
+    
+    #output_yml_filename_path = hdf5_vis.take_yml_snapshot_of_hdf5_file(output_filename)
+
+    return path_to_output_file #, output_yml_filename_path
+
 def create_hdf5_file_from_dataframe(ofilename, input_data, group_by_funcs: list, approach: str = None, extract_attrs_func=None):
     """
     Creates an HDF5 file with hierarchical groups based on the specified grouping functions or columns.
@@ -400,6 +525,6 @@ def save_processed_dataframe_to_hdf5(df, annotator, output_filename): # src_hdf5
     with h5py.File(output_filename, mode) as h5file:
         # Add project level attributes at the root/top level
         h5file.attrs.update(project_level_attributes)
-        __transfer_file_dict_to_hdf5(h5file, '/', file_dict)
+        hdf5_ops.save_file_dict_to_hdf5(h5file, '/', file_dict)
 
 #if __name__ == '__main__':
-- 
GitLab


From 4f438f86fe778fbcb02aaa063384ffb56e9b3630 Mon Sep 17 00:00:00 2001
From: Florez Ospina Juan Felipe <juan.florez-ospina@psi.ch>
Date: Tue, 25 Feb 2025 09:21:52 +0100
Subject: [PATCH 9/9] Update import statements in
 pipelines/data_integration.py. from instruments.readers import ... -> from
 instruments import ...

---
 pipelines/data_integration.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pipelines/data_integration.py b/pipelines/data_integration.py
index 5bb5bb7..4cb5233 100644
--- a/pipelines/data_integration.py
+++ b/pipelines/data_integration.py
@@ -30,7 +30,7 @@ except ModuleNotFoundError:
     print(':)')
     import src.hdf5_writer as hdf5_lib
     import utils.g5505_utils as utils
-    from instruments.readers import filereader_registry
+    from instruments import filereader_registry
 
 allowed_file_extensions = filereader_registry.file_extensions
 
-- 
GitLab