' + str(os.path.basename(filename)) + " "
+ ''
+ + str(os.path.basename(filename))
+ + " "
"has been loaded !"
)
)
@@ -125,8 +127,12 @@ def process_data(self):
compact_lambda_2d[row_index, column_index] = self.lambda_hkl[_key]
lambda_2d[y0:y1, x0:x1] = self.lambda_hkl[_key]
- strain_mapping_2d[y0:y1, x0:x1] = self.strain_mapping[_key]["val"] # to go to microstrain
- compact_strain_mapping[row_index, column_index] = self.strain_mapping[_key]["val"]
+ strain_mapping_2d[y0:y1, x0:x1] = self.strain_mapping[_key][
+ "val"
+ ] # to go to microstrain
+ compact_strain_mapping[row_index, column_index] = self.strain_mapping[_key][
+ "val"
+ ]
d_2d[y0:y1, x0:x1] = self.d[_key]
@@ -208,7 +214,9 @@ def display_lambda(self):
fig = plt.figure(figsize=(4, 4), num="\u03bb (\u212b)")
self.ax0 = fig.add_subplot(111)
- self.ax0.imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ self.ax0.imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
# self.im0 = self.ax0.imshow(self.compact_lambda_2d, cmap='jet', alpha=0.5)
self.im0 = self.ax0.imshow(self.lambda_hkl_2d, cmap="jet", alpha=0.5)
@@ -232,17 +240,29 @@ def plot_lambda(min_value, max_value, colormap, interpolation_method):
# data = self.compact_lambda_2d
self.ax0.cla()
- self.ax0.imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ self.ax0.imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
self.im0 = self.ax0.imshow(
- data, interpolation=interpolation_method, cmap=colormap, vmin=min_value, vmax=max_value
+ data,
+ interpolation=interpolation_method,
+ cmap=colormap,
+ vmin=min_value,
+ vmax=max_value,
)
self.cb0 = plt.colorbar(self.im0, ax=self.ax0)
v = interactive(
plot_lambda,
- min_value=widgets.FloatSlider(min=minimum, max=maximum, value=minimum, step=step),
- max_value=widgets.FloatSlider(min=minimum, max=maximum, value=maximum, step=step),
- colormap=widgets.Dropdown(options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")),
+ min_value=widgets.FloatSlider(
+ min=minimum, max=maximum, value=minimum, step=step
+ ),
+ max_value=widgets.FloatSlider(
+ min=minimum, max=maximum, value=maximum, step=step
+ ),
+ colormap=widgets.Dropdown(
+ options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")
+ ),
interpolation_method=widgets.Dropdown(
options=INTERPOLATION_METHODS,
value=DEFAULT_INTERPOLATION,
@@ -256,7 +276,9 @@ def display_d(self):
fig = plt.figure(figsize=(4, 4), num="d")
self.ax1 = fig.add_subplot(111)
- self.ax1.imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ self.ax1.imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
# self.im0 = self.ax0.imshow(self.compact_lambda_2d, cmap='jet', alpha=0.5)
self.im1 = self.ax1.imshow(self.d_2d, cmap="jet", alpha=0.5)
@@ -278,17 +300,29 @@ def plot_d(min_value, max_value, colormap, interpolation_method):
data = self.d_2d
self.ax1.cla()
- self.ax1.imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ self.ax1.imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
self.im1 = self.ax1.imshow(
- data, interpolation=interpolation_method, cmap=colormap, vmin=min_value, vmax=max_value
+ data,
+ interpolation=interpolation_method,
+ cmap=colormap,
+ vmin=min_value,
+ vmax=max_value,
)
self.cb1 = plt.colorbar(self.im1, ax=self.ax1)
v = interactive(
plot_d,
- min_value=widgets.FloatSlider(min=minimum, max=maximum, value=minimum, step=step),
- max_value=widgets.FloatSlider(min=minimum, max=maximum, value=maximum, step=step),
- colormap=widgets.Dropdown(options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")),
+ min_value=widgets.FloatSlider(
+ min=minimum, max=maximum, value=minimum, step=step
+ ),
+ max_value=widgets.FloatSlider(
+ min=minimum, max=maximum, value=maximum, step=step
+ ),
+ colormap=widgets.Dropdown(
+ options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")
+ ),
interpolation_method=widgets.Dropdown(
options=INTERPOLATION_METHODS,
value=DEFAULT_INTERPOLATION,
@@ -302,7 +336,9 @@ def display_microstrain(self):
fig = plt.figure(figsize=(4, 4), num="microstrain")
self.ax2 = fig.add_subplot(111)
- self.ax2.imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ self.ax2.imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
self.im2 = self.ax2.imshow(self.strain_2d, cmap="jet", alpha=0.5)
self.cb2 = plt.colorbar(self.im2, ax=self.ax2)
@@ -321,17 +357,29 @@ def plot_strain(min_value, max_value, colormap, interpolation_method):
data = self.strain_2d
self.ax2.cla()
- self.ax2.imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ self.ax2.imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
self.im2 = self.ax2.imshow(
- data, interpolation=interpolation_method, cmap=colormap, vmin=min_value, vmax=max_value
+ data,
+ interpolation=interpolation_method,
+ cmap=colormap,
+ vmin=min_value,
+ vmax=max_value,
)
self.cb2 = plt.colorbar(self.im2, ax=self.ax2)
v = interactive(
plot_strain,
- min_value=widgets.FloatSlider(min=minimum, max=maximum, value=minimum, step=step),
- max_value=widgets.FloatSlider(min=minimum, max=maximum, value=maximum, step=step),
- colormap=widgets.Dropdown(options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")),
+ min_value=widgets.FloatSlider(
+ min=minimum, max=maximum, value=minimum, step=step
+ ),
+ max_value=widgets.FloatSlider(
+ min=minimum, max=maximum, value=maximum, step=step
+ ),
+ colormap=widgets.Dropdown(
+ options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")
+ ),
interpolation_method=widgets.Dropdown(
options=INTERPOLATION_METHODS,
value=DEFAULT_INTERPOLATION,
@@ -378,7 +426,9 @@ def display_microstrain_with_interpolation(self):
scale_factor = self.bin_size
out_dimensions = (grid.shape[0] * scale_factor, grid.shape[1] * scale_factor)
- fig1, axs = plt.subplots(nrows=4, num="microstrain interpolated", figsize=[5, 20])
+ fig1, axs = plt.subplots(
+ nrows=4, num="microstrain interpolated", figsize=[5, 20]
+ )
transform = Affine2D().scale(scale_factor, scale_factor)
# Have to get an image to be able to resample
@@ -400,9 +450,13 @@ def display_microstrain_with_interpolation(self):
[y0, x0] = self.top_left_corner_of_roi
inter_height, inter_width = np.shape(interpolated)
- interpolated_strain_mapping_2d[y0 : y0 + inter_height, x0 : x0 + inter_width] = interpolated
+ interpolated_strain_mapping_2d[
+ y0 : y0 + inter_height, x0 : x0 + inter_width
+ ] = interpolated
- axs[3].imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ axs[3].imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
im = axs[3].imshow(interpolated_strain_mapping_2d, interpolation="gaussian")
self.cb = plt.colorbar(im, ax=axs[3])
@@ -417,26 +471,34 @@ def plot_interpolated(min_value, max_value, colormap, interpolation_method):
axs[0].imshow(grid, cmap=colormap)
axs[1].cla()
- img1 = axs[1].imshow(grid, interpolation=interpolation_method, cmap=colormap)
+ img1 = axs[1].imshow(
+ grid, interpolation=interpolation_method, cmap=colormap
+ )
interpolated = _resample(img1, grid, out_dimensions, transform=transform)
axs[2].cla()
axs[2].imshow(interpolated, vmin=min_value, vmax=max_value, cmap=colormap)
# with overlap
- interpolated_strain_mapping_2d = np.empty((self.image_height, self.image_width))
+ interpolated_strain_mapping_2d = np.empty(
+ (self.image_height, self.image_width)
+ )
interpolated_strain_mapping_2d[:] = np.nan
[y0, x0] = self.top_left_corner_of_roi
inter_height, inter_width = np.shape(interpolated)
- interpolated_strain_mapping_2d[y0 : y0 + inter_height, x0 : x0 + inter_width] = interpolated
+ interpolated_strain_mapping_2d[
+ y0 : y0 + inter_height, x0 : x0 + inter_width
+ ] = interpolated
if self.cb:
self.cb.remove()
axs[3].cla()
- axs[3].imshow(self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray")
+ axs[3].imshow(
+ self.integrated_normalized_radiographs, vmin=0, vmax=1, cmap="gray"
+ )
im = axs[3].imshow(
interpolated_strain_mapping_2d * 1e6,
interpolation=interpolation_method,
@@ -448,9 +510,23 @@ def plot_interpolated(min_value, max_value, colormap, interpolation_method):
v = interactive(
plot_interpolated,
- min_value=widgets.FloatSlider(min=minimum, max=maximum, value=minimum, step=step, description="min (x1e6)"),
- max_value=widgets.FloatSlider(min=minimum, max=maximum, value=maximum, step=step, description="min (x1e6)"),
- colormap=widgets.Dropdown(options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")),
+ min_value=widgets.FloatSlider(
+ min=minimum,
+ max=maximum,
+ value=minimum,
+ step=step,
+ description="min (x1e6)",
+ ),
+ max_value=widgets.FloatSlider(
+ min=minimum,
+ max=maximum,
+ value=maximum,
+ step=step,
+ description="min (x1e6)",
+ ),
+ colormap=widgets.Dropdown(
+ options=CMAPS, value=DEFAULT_CMAPS, layout=widgets.Layout(width="300px")
+ ),
interpolation_method=widgets.Dropdown(
options=INTERPOLATION_METHODS,
value=DEFAULT_INTERPOLATION,
diff --git a/notebooks/__code/icons/icons_rc.py b/notebooks/__code/icons/icons_rc.py
index 5283caac..6671cf60 100755
--- a/notebooks/__code/icons/icons_rc.py
+++ b/notebooks/__code/icons/icons_rc.py
@@ -133,11 +133,15 @@
def qInitResources():
- QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
+ QtCore.qRegisterResourceData(
+ rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
+ )
def qCleanupResources():
- QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
+ QtCore.qUnregisterResourceData(
+ rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
+ )
qInitResources()
diff --git a/notebooks/__code/image_profile_interface_template/dual_energy.py b/notebooks/__code/image_profile_interface_template/dual_energy.py
index ca1294fe..b013d75c 100755
--- a/notebooks/__code/image_profile_interface_template/dual_energy.py
+++ b/notebooks/__code/image_profile_interface_template/dual_energy.py
@@ -19,7 +19,9 @@
from __code.bragg_edge.kropff import Kropff
from __code.bragg_edge.kropff_fitting_job_handler import KropffFittingJobHandler
from __code.bragg_edge.march_dollase import MarchDollase
-from __code.bragg_edge.march_dollase_fitting_job_handler import MarchDollaseFittingJobHandler
+from __code.bragg_edge.march_dollase_fitting_job_handler import (
+ MarchDollaseFittingJobHandler,
+)
from __code.bragg_edge.peak_fitting_initialization import PeakFittingInitialization
from __code.table_handler import TableHandler
from __code.utilities import find_nearest_index
@@ -35,7 +37,15 @@ def load_ob(self, folder_selected):
class Interface(QMainWindow):
fitting_parameters_init = {
- "kropff": {"a0": 1, "b0": 1, "ahkl": 1, "bhkl": 1, "ldahkl": 1e-8, "tau": 1, "sigma": [1e-7, 1e-6, 1e-5]}
+ "kropff": {
+ "a0": 1,
+ "b0": 1,
+ "ahkl": 1,
+ "bhkl": 1,
+ "ldahkl": 1e-8,
+ "tau": 1,
+ "sigma": [1e-7, 1e-6, 1e-5],
+ }
}
bragg_edge_range = [5, 20]
@@ -45,13 +55,17 @@ class Interface(QMainWindow):
selection_roi_rgb = (62, 13, 244)
roi_settings = {
- "color": QtGui.QColor(selection_roi_rgb[0], selection_roi_rgb[1], selection_roi_rgb[2]),
+ "color": QtGui.QColor(
+ selection_roi_rgb[0], selection_roi_rgb[1], selection_roi_rgb[2]
+ ),
"width": 0.01,
"position": [10, 10],
}
shrinking_roi_rgb = (13, 214, 244)
shrinking_roi_settings = {
- "color": QtGui.QColor(shrinking_roi_rgb[0], shrinking_roi_rgb[1], shrinking_roi_rgb[2]),
+ "color": QtGui.QColor(
+ shrinking_roi_rgb[0], shrinking_roi_rgb[1], shrinking_roi_rgb[2]
+ ),
"width": 0.01,
"dashes_pattern": [4, 2],
}
@@ -62,7 +76,11 @@ class Interface(QMainWindow):
previous_roi_selection = {"width": None, "height": None}
image_size = {"width": None, "height": None}
roi_id = None
- xaxis_label = {"index": "File index", "tof": "TOF (\u00b5s)", "lambda": "\u03bb (\u212b)"}
+ xaxis_label = {
+ "index": "File index",
+ "tof": "TOF (\u00b5s)",
+ "lambda": "\u03bb (\u212b)",
+ }
fitting_rois = {
"kropff": {
"step1": None,
@@ -74,7 +92,11 @@ class Interface(QMainWindow):
is_file_imported = False # True only when the import button has been used
bragg_edge_range_ui = None
- kropff_fitting_range = {"high": [None, None], "low": [None, None], "bragg_peak": [None, None]}
+ kropff_fitting_range = {
+ "high": [None, None],
+ "low": [None, None],
+ "bragg_peak": [None, None],
+ }
fitting_peak_ui = None # vertical line in fitting view (tab 2)
fitting_procedure_started = {"march-dollase": False, "kropff": False}
@@ -130,7 +152,9 @@ def update_time_spectra(self):
distance_source_detector_m = float(self.ui.distance_detector_sample.text())
self.ui.statusbar.showMessage("", 100) # 10s
except ValueError:
- self.ui.statusbar.showMessage("distance source detector input is WRONG", 120000) # 2mn
+ self.ui.statusbar.showMessage(
+ "distance source detector input is WRONG", 120000
+ ) # 2mn
self.ui.statusbar.setStyleSheet("color: red")
return
@@ -138,7 +162,9 @@ def update_time_spectra(self):
detector_offset_micros = float(self.ui.detector_offset.text())
self.ui.statusbar.showMessage("", 100) # 10s
except ValueError:
- self.ui.statusbar.showMessage("detector offset input is WRONG", 120000) # 2mn
+ self.ui.statusbar.showMessage(
+ "detector offset input is WRONG", 120000
+ ) # 2mn
self.ui.statusbar.setStyleSheet("color: red")
return
@@ -158,7 +184,9 @@ def get_live_image(self):
_data = self.o_norm.data["sample"]["data"]
nbr_images = len(_data)
- list_of_indexes_to_keep = random.sample(list(range(nbr_images)), nbr_data_to_use)
+ list_of_indexes_to_keep = random.sample(
+ list(range(nbr_images)), nbr_data_to_use
+ )
final_array = []
for _index in list_of_indexes_to_keep:
@@ -208,7 +236,10 @@ def bragg_edge_range_changed(self):
def reset_profile_of_bin_size_slider(self):
max_value = np.min(
- [int(str(self.ui.profile_of_bin_size_width.text())), int(str(self.ui.profile_of_bin_size_height.text()))]
+ [
+ int(str(self.ui.profile_of_bin_size_width.text())),
+ int(str(self.ui.profile_of_bin_size_height.text())),
+ ]
)
self.ui.profile_of_bin_size_slider.setMaximum(max_value)
self.ui.profile_of_bin_size_slider.setValue(max_value)
@@ -263,7 +294,11 @@ def update_dict_profile_to_fit(self):
profile_to_fit = {
"yaxis": yaxis,
- "xaxis": {"index": index_selected, "tof": tof_selected, "lambda": lambda_selected},
+ "xaxis": {
+ "index": index_selected,
+ "tof": tof_selected,
+ "lambda": lambda_selected,
+ },
}
self.dict_profile_to_fit = profile_to_fit
@@ -273,11 +308,16 @@ def fit_that_selection_pushed_by_program(self, initialize_region=True):
dict_regions = o_get.all_russian_doll_region_full_infos()
o_init = PeakFittingInitialization(parent=self)
- fitting_input_dictionary = o_init.fitting_input_dictionary(nbr_rois=len(dict_regions))
+ fitting_input_dictionary = o_init.fitting_input_dictionary(
+ nbr_rois=len(dict_regions)
+ )
o_init.set_top_keys_values(
- fitting_input_dictionary, {"xaxis": x_axis, "bragg_edge_range": self.bragg_edge_range}
+ fitting_input_dictionary,
+ {"xaxis": x_axis, "bragg_edge_range": self.bragg_edge_range},
+ )
+ self.append_dict_regions_to_fitting_input_dictionary(
+ dict_regions, fitting_input_dictionary
)
- self.append_dict_regions_to_fitting_input_dictionary(dict_regions, fitting_input_dictionary)
# fitting_input_dictionary['xaxis'] = x_axis
# fitting_input_dictionary['bragg_edge_range'] = self.bragg_edge_range
@@ -305,7 +345,9 @@ def fit_that_selection_pushed_by_program(self, initialize_region=True):
self.ui.actionExport.setEnabled(True)
self.select_first_row_of_all_fitting_table()
- def append_dict_regions_to_fitting_input_dictionary(self, dict_regions, fitting_input_dictionary):
+ def append_dict_regions_to_fitting_input_dictionary(
+ self, dict_regions, fitting_input_dictionary
+ ):
for _row in dict_regions.keys():
_entry = dict_regions[_row]
for _key in _entry.keys():
@@ -446,18 +488,27 @@ def update_profile_of_bin_slider_labels(self):
def change_profile_of_bin_slider_signal(self):
self.ui.profile_of_bin_size_slider.valueChanged.disconnect()
- self.ui.profile_of_bin_size_slider.valueChanged.connect(self.profile_of_bin_size_slider_changed_after_import)
+ self.ui.profile_of_bin_size_slider.valueChanged.connect(
+ self.profile_of_bin_size_slider_changed_after_import
+ )
def update_vertical_line_in_profile_plot(self):
o_get = Get(parent=self)
x_axis, x_axis_label = o_get.x_axis()
- bragg_edge_range = [x_axis[self.bragg_edge_range[0]], x_axis[self.bragg_edge_range[1]]]
+ bragg_edge_range = [
+ x_axis[self.bragg_edge_range[0]],
+ x_axis[self.bragg_edge_range[1]],
+ ]
if self.bragg_edge_range_ui:
self.ui.profile.removeItem(self.bragg_edge_range_ui)
self.bragg_edge_range_ui = pg.LinearRegionItem(
- values=bragg_edge_range, orientation=None, brush=None, movable=True, bounds=None
+ values=bragg_edge_range,
+ orientation=None,
+ brush=None,
+ movable=True,
+ bounds=None,
)
self.bragg_edge_range_ui.sigRegionChanged.connect(self.bragg_edge_range_changed)
self.bragg_edge_range_ui.setZValue(-10)
@@ -508,7 +559,12 @@ def export_button_clicked(self):
def roi_radiobuttons_changed(self):
if self.ui.square_roi_radiobutton.isChecked():
slider_visible = True
- new_width = np.min([int(str(self.ui.roi_width.text())), int(str(self.ui.roi_height.text()))])
+ new_width = np.min(
+ [
+ int(str(self.ui.roi_width.text())),
+ int(str(self.ui.roi_height.text())),
+ ]
+ )
mode = "square"
else:
slider_visible = False
@@ -581,30 +637,34 @@ def update_kropff_fit_table_graph(self, fit_region="high"):
:param fit_region: 'high', 'low' or 'bragg_peak'
"""
o_gui = GuiUtility(parent=self)
- fit_parameter_selected = o_gui.get_kropff_fit_parameter_selected(fit_region=fit_region)
+ fit_parameter_selected = o_gui.get_kropff_fit_parameter_selected(
+ fit_region=fit_region
+ )
parameter_array = []
parameter_error_array = []
fitting_input_dictionary = self.fitting_input_dictionary
for _index in fitting_input_dictionary["rois"].keys():
- _parameter = fitting_input_dictionary["rois"][_index]["fitting"]["kropff"][fit_region][
- fit_parameter_selected
- ]
- _error = fitting_input_dictionary["rois"][_index]["fitting"]["kropff"][fit_region][
- f"{fit_parameter_selected}_error"
- ]
+ _parameter = fitting_input_dictionary["rois"][_index]["fitting"]["kropff"][
+ fit_region
+ ][fit_parameter_selected]
+ _error = fitting_input_dictionary["rois"][_index]["fitting"]["kropff"][
+ fit_region
+ ][f"{fit_parameter_selected}_error"]
parameter_array.append(_parameter)
parameter_error_array.append(_error)
plot_ui = o_gui.get_kropff_fit_graph_ui(fit_region=fit_region)
x_array = np.arange(len(parameter_array))
- cleaned_parameter_array, cleaned_parameter_error_array = exclude_y_value_when_error_is_nan(
- parameter_array, parameter_error_array
+ cleaned_parameter_array, cleaned_parameter_error_array = (
+ exclude_y_value_when_error_is_nan(parameter_array, parameter_error_array)
)
plot_ui.axes.cla()
if fit_region == "bragg_peak":
plot_ui.axes.set_yscale("log")
- plot_ui.axes.errorbar(x_array, cleaned_parameter_array, cleaned_parameter_error_array, marker="s")
+ plot_ui.axes.errorbar(
+ x_array, cleaned_parameter_array, cleaned_parameter_error_array, marker="s"
+ )
plot_ui.axes.set_xlabel("Row # (see Table tab)")
plot_ui.draw()
@@ -640,14 +700,18 @@ def kropff_bragg_peak_right_click(self, position):
def march_dollase_table_state_changed(self, state=None, row=None, column=None):
o_march = MarchDollase(parent=self)
if row == 0:
- _widget = self.ui.march_dollase_user_input_table.cellWidget(row, column).children()[-1]
+ _widget = self.ui.march_dollase_user_input_table.cellWidget(
+ row, column
+ ).children()[-1]
if (column == 1) or (column == 2):
_textedit = _widget
_textedit.setText(o_march.get_initial_parameter_value(column=column))
_textedit.setVisible(not state)
elif column == 0:
_label = _widget
- _label.setText(f"{float(o_march.get_initial_parameter_value(column=column)):0.6f}")
+ _label.setText(
+ f"{float(o_march.get_initial_parameter_value(column=column)):0.6f}"
+ )
_label.setVisible(not state)
else:
_label = _widget
diff --git a/notebooks/__code/image_profile_interface_template/interface_initialization.py b/notebooks/__code/image_profile_interface_template/interface_initialization.py
index 8db79cb3..9e161b55 100755
--- a/notebooks/__code/image_profile_interface_template/interface_initialization.py
+++ b/notebooks/__code/image_profile_interface_template/interface_initialization.py
@@ -104,9 +104,15 @@ def _matplotlib(parent=None, widget=None):
widget.setLayout(layout)
return sc
- self.parent.kropff_high_plot = _matplotlib(parent=self.parent, widget=self.parent.ui.high_widget)
- self.parent.kropff_low_plot = _matplotlib(parent=self.parent, widget=self.parent.ui.low_widget)
- self.parent.kropff_bragg_peak_plot = _matplotlib(parent=self.parent, widget=self.parent.ui.bragg_peak_widget)
+ self.parent.kropff_high_plot = _matplotlib(
+ parent=self.parent, widget=self.parent.ui.high_widget
+ )
+ self.parent.kropff_low_plot = _matplotlib(
+ parent=self.parent, widget=self.parent.ui.low_widget
+ )
+ self.parent.kropff_bragg_peak_plot = _matplotlib(
+ parent=self.parent, widget=self.parent.ui.bragg_peak_widget
+ )
self.parent.march_dollase_plot = _matplotlib(
parent=self.parent, widget=self.parent.ui.march_dollase_graph_widget
@@ -122,7 +128,13 @@ def pyqtgraph_fitting(self):
def kropff_fitting_table(self):
## Kropff
# high lambda
- column_names = ["x\u2080; y\u2080; width; height", "a\u2080", "b\u2080", "a\u2080_error", "b\u2080_error"]
+ column_names = [
+ "x\u2080; y\u2080; width; height",
+ "a\u2080",
+ "b\u2080",
+ "a\u2080_error",
+ "b\u2080_error",
+ ]
column_sizes = [150, 100, 100, 100, 100]
o_high = TableHandler(table_ui=self.parent.ui.high_lda_tableWidget)
for _col_index, _col_name in enumerate(column_names):
@@ -131,7 +143,13 @@ def kropff_fitting_table(self):
o_high.set_column_sizes(column_sizes=column_sizes)
# low lambda
- column_names = ["x\u2080; y\u2080; width; height", "a_hkl", "b_hkl", "a_hkl_error", "b_hkl_error"]
+ column_names = [
+ "x\u2080; y\u2080; width; height",
+ "a_hkl",
+ "b_hkl",
+ "a_hkl_error",
+ "b_hkl_error",
+ ]
column_sizes = [150, 100, 100, 100, 100]
o_low = TableHandler(table_ui=self.parent.ui.low_lda_tableWidget)
for _col_index, _col_name in enumerate(column_names):
@@ -140,7 +158,15 @@ def kropff_fitting_table(self):
o_low.set_column_sizes(column_sizes=column_sizes)
# bragg edge
- column_names = ["x0; y0; width; height", "t_hkl", "tau", "sigma", "t_hkl_error", "tau_error", "sigma_error"]
+ column_names = [
+ "x0; y0; width; height",
+ "t_hkl",
+ "tau",
+ "sigma",
+ "t_hkl_error",
+ "tau_error",
+ "sigma_error",
+ ]
column_sizes = [150, 100, 100, 100, 100, 100, 100]
o_bragg = TableHandler(table_ui=self.parent.ui.bragg_edge_tableWidget)
for _col_index, _col_name in enumerate(column_names):
@@ -149,15 +175,23 @@ def kropff_fitting_table(self):
o_bragg.set_column_sizes(column_sizes=column_sizes)
def march_dollase(self):
- self.parent.march_dollase_history_state_full_reset = copy.deepcopy(self.march_dollase_history_state)
+ self.parent.march_dollase_history_state_full_reset = copy.deepcopy(
+ self.march_dollase_history_state
+ )
# init widgets
_file_path = os.path.dirname(__file__)
- up_arrow_file = os.path.abspath(os.path.join(_file_path, "../static/up_arrow_black.png"))
+ up_arrow_file = os.path.abspath(
+ os.path.join(_file_path, "../static/up_arrow_black.png")
+ )
self.parent.ui.march_dollase_user_input_up.setIcon(QtGui.QIcon(up_arrow_file))
- down_arrow_file = os.path.abspath(os.path.join(_file_path, "../static/down_arrow_black.png"))
- self.parent.ui.march_dollase_user_input_down.setIcon(QtGui.QIcon(down_arrow_file))
+ down_arrow_file = os.path.abspath(
+ os.path.join(_file_path, "../static/down_arrow_black.png")
+ )
+ self.parent.ui.march_dollase_user_input_down.setIcon(
+ QtGui.QIcon(down_arrow_file)
+ )
o_gui = GuiUtility(parent=self.parent)
o_gui.fill_march_dollase_table(
@@ -165,7 +199,9 @@ def march_dollase(self):
initial_parameters=self.parent.march_dollase_fitting_initial_parameters,
)
- self.parent.march_dollase_fitting_history_table = self.march_dollase_history_state
+ self.parent.march_dollase_fitting_history_table = (
+ self.march_dollase_history_state
+ )
self.parent.march_dollase_fitting_history_table_default_new_row = copy.deepcopy(
self.march_dollase_history_state[0]
)
@@ -187,16 +223,36 @@ def march_dollase(self):
"A\u2085_error",
"A\u2086_error",
]
- column_sizes = [150, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
+ column_sizes = [
+ 150,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ 100,
+ ]
o_march = TableHandler(table_ui=self.parent.ui.march_dollase_result_table)
for _col_index, _col_name in enumerate(column_names):
o_march.insert_column(_col_index)
o_march.set_column_names(column_names=column_names)
o_march.set_column_sizes(column_sizes=column_sizes)
- state_advanced_columns = not self.parent.ui.march_dollase_advanced_mode_checkBox.isChecked()
+ state_advanced_columns = (
+ not self.parent.ui.march_dollase_advanced_mode_checkBox.isChecked()
+ )
o_gui.set_columns_hidden(
- table_ui=self.parent.ui.march_dollase_user_input_table, list_of_columns=[5, 6], state=state_advanced_columns
+ table_ui=self.parent.ui.march_dollase_user_input_table,
+ list_of_columns=[5, 6],
+ state=state_advanced_columns,
)
# table
@@ -213,17 +269,29 @@ def labels(self):
self.parent.ui.fitting_lambda_radiobutton.setText("\u03bb (\u212b)")
def text_fields(self):
- self.parent.ui.distance_detector_sample.setText(str(self.distance_detector_sample))
+ self.parent.ui.distance_detector_sample.setText(
+ str(self.distance_detector_sample)
+ )
self.parent.ui.detector_offset.setText(str(self.detector_offset))
- self.parent.ui.kropff_high_lda_a0_init.setText(str(self.parent.fitting_parameters_init["kropff"]["a0"]))
- self.parent.ui.kropff_high_lda_b0_init.setText(str(self.parent.fitting_parameters_init["kropff"]["b0"]))
- self.parent.ui.kropff_low_lda_ahkl_init.setText(str(self.parent.fitting_parameters_init["kropff"]["ahkl"]))
- self.parent.ui.kropff_low_lda_bhkl_init.setText(str(self.parent.fitting_parameters_init["kropff"]["bhkl"]))
+ self.parent.ui.kropff_high_lda_a0_init.setText(
+ str(self.parent.fitting_parameters_init["kropff"]["a0"])
+ )
+ self.parent.ui.kropff_high_lda_b0_init.setText(
+ str(self.parent.fitting_parameters_init["kropff"]["b0"])
+ )
+ self.parent.ui.kropff_low_lda_ahkl_init.setText(
+ str(self.parent.fitting_parameters_init["kropff"]["ahkl"])
+ )
+ self.parent.ui.kropff_low_lda_bhkl_init.setText(
+ str(self.parent.fitting_parameters_init["kropff"]["bhkl"])
+ )
self.parent.ui.kropff_bragg_peak_ldahkl_init.setText(
str(self.parent.fitting_parameters_init["kropff"]["ldahkl"])
)
- self.parent.ui.kropff_bragg_peak_tau_init.setText(str(self.parent.fitting_parameters_init["kropff"]["tau"]))
+ self.parent.ui.kropff_bragg_peak_tau_init.setText(
+ str(self.parent.fitting_parameters_init["kropff"]["tau"])
+ )
# list_sigma = self.parent.fitting_parameters_init['kropff']['sigma']
# list_sigma = [str(_value) for _value in list_sigma]
# str_list_sigma = ", ".join(list_sigma)
@@ -237,7 +305,9 @@ def widgets(self):
self.parent.ui.splitter.setSizes([500, 400])
self.parent.ui.roi_size_slider.setMinimum(1)
- max_value = np.min([self.parent.image_size["width"], self.parent.image_size["height"]])
+ max_value = np.min(
+ [self.parent.image_size["width"], self.parent.image_size["height"]]
+ )
self.parent.ui.roi_size_slider.setMaximum(max_value)
default_roi_size = int(max_value / 3)
self.parent.ui.roi_size_slider.setValue(default_roi_size)
diff --git a/notebooks/__code/images_and_metadata_extrapolation_matcher.py b/notebooks/__code/images_and_metadata_extrapolation_matcher.py
index f761e7cc..2594e532 100755
--- a/notebooks/__code/images_and_metadata_extrapolation_matcher.py
+++ b/notebooks/__code/images_and_metadata_extrapolation_matcher.py
@@ -40,8 +40,12 @@ def get_merged_dataframe(self):
return self.merged_dataframe
def load_ascii_files(self):
- self.ascii_file_1_dataframe = self.retrieve_dataframe(filename=self.ascii_file_1)
- self.ascii_file_2_dataframe = self.retrieve_dataframe(filename=self.ascii_file_2)
+ self.ascii_file_1_dataframe = self.retrieve_dataframe(
+ filename=self.ascii_file_1
+ )
+ self.ascii_file_2_dataframe = self.retrieve_dataframe(
+ filename=self.ascii_file_2
+ )
def retrieve_dataframe(self, filename=""):
_dataframe = pd.read_csv(filename)
@@ -49,7 +53,9 @@ def retrieve_dataframe(self, filename=""):
return _dataframe
def remove_white_space_in_column_names(self, dataframe):
- clean_column_names = [_old_col_name.strip() for _old_col_name in list(dataframe.columns.values)]
+ clean_column_names = [
+ _old_col_name.strip() for _old_col_name in list(dataframe.columns.values)
+ ]
dataframe.columns = clean_column_names
return dataframe
@@ -65,7 +71,9 @@ def format_timestamp(self, dataframe):
return dataframe
def merge_data(self):
- if (INDEX_SIMPLE_MERGE in self.ascii_file_1_dataframe) and (INDEX_SIMPLE_MERGE in self.ascii_file_2_dataframe):
+ if (INDEX_SIMPLE_MERGE in self.ascii_file_1_dataframe) and (
+ INDEX_SIMPLE_MERGE in self.ascii_file_2_dataframe
+ ):
self.simple_merge()
else:
@@ -76,7 +84,10 @@ def simple_merge(self):
self.set_index(self.ascii_file_2_dataframe)
self.merged_dataframe = pd.merge(
- self.ascii_file_1_dataframe, self.ascii_file_2_dataframe, on=INDEX_SIMPLE_MERGE, how="outer"
+ self.ascii_file_1_dataframe,
+ self.ascii_file_2_dataframe,
+ on=INDEX_SIMPLE_MERGE,
+ how="outer",
)
def set_index(self, dataframe, index=INDEX_SIMPLE_MERGE):
@@ -87,7 +98,10 @@ def merge_with_extrapolation(self):
self.set_index(self.ascii_file_2_dataframe, index=INDEX_EXTRAPOLATION_MERGE)
merged_dataframe = pd.merge(
- self.ascii_file_1_dataframe, self.ascii_file_2_dataframe, on=INDEX_EXTRAPOLATION_MERGE, how="outer"
+ self.ascii_file_1_dataframe,
+ self.ascii_file_2_dataframe,
+ on=INDEX_EXTRAPOLATION_MERGE,
+ how="outer",
)
merged_dataframe.sort_values(by=INDEX_EXTRAPOLATION_MERGE, inplace=True)
self.merged_dataframe = merged_dataframe.reset_index(drop=True)
@@ -96,11 +110,21 @@ def merge_with_extrapolation(self):
def select_metadata_to_extrapolate(self):
list_metadata = self.get_column_names(self.merged_dataframe)
- display(HTML('CTRL + Click to select multiple rows!'))
+ display(
+ HTML(
+ 'CTRL + Click to select multiple rows!'
+ )
+ )
box = widgets.HBox(
[
- widgets.Label("Select Metadata to Extrapolate:", layout=widgets.Layout(width="30%")),
- widgets.SelectMultiple(options=list_metadata, layout=widgets.Layout(width="70%", height="70%")),
+ widgets.Label(
+ "Select Metadata to Extrapolate:",
+ layout=widgets.Layout(width="30%"),
+ ),
+ widgets.SelectMultiple(
+ options=list_metadata,
+ layout=widgets.Layout(width="70%", height="70%"),
+ ),
],
layout=widgets.Layout(height="250px"),
)
@@ -125,7 +149,9 @@ def extrapolate_metadata(self, metadata_name=""):
_metadata_value = metadata_array[_index]
if np.isnan(_metadata_value):
_new_value = Extrapolate.calculate_extrapolated_metadata(
- global_index=_index, metadata_array=metadata_array, timestamp_array=timestamp_array
+ global_index=_index,
+ metadata_array=metadata_array,
+ timestamp_array=timestamp_array,
)
extrapolated_metadata_array.append(_new_value)
extrapolated_timestamp_array.append(timestamp_array[_index])
@@ -144,10 +170,15 @@ def metadata_to_display_init(self):
self.metadata_to_display_changed(value)
def metadata_to_display_changed(self, name_of_metadata_to_display):
- self.extract_known_and_unknown_axis_infos(metadata_name=name_of_metadata_to_display)
+ self.extract_known_and_unknown_axis_infos(
+ metadata_name=name_of_metadata_to_display
+ )
data_known = go.Scatter(
- x=self.timestamp_s_metadata_known, y=self.metadata_column, mode="markers", name="Original metadata"
+ x=self.timestamp_s_metadata_known,
+ y=self.metadata_column,
+ mode="markers",
+ name="Original metadata",
)
data_extrapolated = go.Scatter(
@@ -174,7 +205,9 @@ def extract_known_and_unknown_axis_infos(self, metadata_name=""):
# known metadata values
timestamp_metadata_known = self.ascii_file_2_dataframe["timestamp_user_format"]
self.timestamp_s_metadata_known = [
- TimestampFormatter.convert_to_second(_time, timestamp_format=TIMESTAMP_FORMAT)
+ TimestampFormatter.convert_to_second(
+ _time, timestamp_format=TIMESTAMP_FORMAT
+ )
for _time in timestamp_metadata_known
]
self.metadata_column = self.ascii_file_2_dataframe[metadata_name]
@@ -187,7 +220,9 @@ def extract_known_and_unknown_axis_infos(self, metadata_name=""):
# for _time in timestamp_metadata_unknown]
timestamp_metadata_unknown = self.extrapolated_timestamp_only[metadata_name]
self.timestamp_s_metadata_unknown = [
- TimestampFormatter.convert_to_second(_time, timestamp_format=TIMESTAMP_FORMAT)
+ TimestampFormatter.convert_to_second(
+ _time, timestamp_format=TIMESTAMP_FORMAT
+ )
for _time in timestamp_metadata_unknown
]
@@ -200,7 +235,9 @@ def get_column_names(self, dataframe):
clean_list_columns = [
_name
for _name in list_columns
- if not self._is_name_in_list(name=_name, list_name=[INDEX_EXTRAPOLATION_MERGE, INDEX_SIMPLE_MERGE])
+ if not self._is_name_in_list(
+ name=_name, list_name=[INDEX_EXTRAPOLATION_MERGE, INDEX_SIMPLE_MERGE]
+ )
]
return clean_list_columns
@@ -223,15 +260,21 @@ def make_and_inform_of_full_output_file_name(self, folder_name):
display_html_message(title_message="Output folder name:", message=folder_name)
output_file_name = self.get_output_file_name()
- display_html_message(title_message="Output file name:", message=output_file_name)
+ display_html_message(
+ title_message="Output file name:", message=output_file_name
+ )
return os.path.join(folder_name, output_file_name)
def export_ascii(self, folder_name):
- full_output_file_name = self.make_and_inform_of_full_output_file_name(folder_name)
+ full_output_file_name = self.make_and_inform_of_full_output_file_name(
+ folder_name
+ )
self.cleanup_merged_dataframe()
self.merged_dataframe.to_csv(full_output_file_name)
- display_html_message(title_message="File Created with Success!", message_type="ok")
+ display_html_message(
+ title_message="File Created with Success!", message_type="ok"
+ )
def cleanup_merged_dataframe(self):
# keeping only the raws with filename information defined
@@ -240,7 +283,9 @@ def cleanup_merged_dataframe(self):
class Extrapolate:
@staticmethod
- def get_first_metadata_and_index_value(index=-1, metadata_array=[], direction="left"):
+ def get_first_metadata_and_index_value(
+ index=-1, metadata_array=[], direction="left"
+ ):
if direction == "left":
coeff = -1
else:
@@ -256,18 +301,30 @@ def get_first_metadata_and_index_value(index=-1, metadata_array=[], direction="l
return [metadata_array[index], index]
@staticmethod
- def calculate_extrapolated_metadata(global_index=-1, metadata_array=[], timestamp_array=[]):
- [left_metadata_value, left_index] = Extrapolate.get_first_metadata_and_index_value(
- index=global_index, metadata_array=metadata_array, direction="left"
+ def calculate_extrapolated_metadata(
+ global_index=-1, metadata_array=[], timestamp_array=[]
+ ):
+ [left_metadata_value, left_index] = (
+ Extrapolate.get_first_metadata_and_index_value(
+ index=global_index, metadata_array=metadata_array, direction="left"
+ )
)
- [right_metadata_value, right_index] = Extrapolate.get_first_metadata_and_index_value(
- index=global_index, metadata_array=metadata_array, direction="right"
+ [right_metadata_value, right_index] = (
+ Extrapolate.get_first_metadata_and_index_value(
+ index=global_index, metadata_array=metadata_array, direction="right"
+ )
)
- left_timestamp_s_format = TimestampFormatter.convert_to_second(timestamp_array[left_index])
- right_timestamp_s_format = TimestampFormatter.convert_to_second(timestamp_array[right_index])
+ left_timestamp_s_format = TimestampFormatter.convert_to_second(
+ timestamp_array[left_index]
+ )
+ right_timestamp_s_format = TimestampFormatter.convert_to_second(
+ timestamp_array[right_index]
+ )
- x_timestamp_s_format = TimestampFormatter.convert_to_second(timestamp_array[global_index])
+ x_timestamp_s_format = TimestampFormatter.convert_to_second(
+ timestamp_array[global_index]
+ )
extra_value = Extrapolate.extrapolate_value(
x=x_timestamp_s_format,
diff --git a/notebooks/__code/images_registration_pystackreg/main.py b/notebooks/__code/images_registration_pystackreg/main.py
index 1fb7fda5..383f5d64 100755
--- a/notebooks/__code/images_registration_pystackreg/main.py
+++ b/notebooks/__code/images_registration_pystackreg/main.py
@@ -11,7 +11,10 @@
from pystackreg import StackReg
from tqdm import tqdm
-from __code._utilities.file import make_or_reset_folder, retrieve_list_of_most_dominant_extension_from_folder
+from __code._utilities.file import (
+ make_or_reset_folder,
+ retrieve_list_of_most_dominant_extension_from_folder,
+)
from __code._utilities.images import read_img_stack
from __code._utilities.json import save_json
from __code._utilities.time import get_current_time_in_special_file_name_format
@@ -61,7 +64,9 @@ def load_images(self, folder_name=None):
self.folder_name = folder_name
# retrieve list of files
- self.list_of_files, ext = retrieve_list_of_most_dominant_extension_from_folder(folder=folder_name)
+ self.list_of_files, ext = retrieve_list_of_most_dominant_extension_from_folder(
+ folder=folder_name
+ )
self.stack = read_img_stack(list_files=self.list_of_files, ext=ext)
@@ -70,20 +75,26 @@ def load_images(self, folder_name=None):
def display_unregistered(self):
def preview_unregistered(image_index, vmin=0.8, vmax=1.2):
- fig, ax = plt.subplots(ncols=3, nrows=1, num="Unregistered images", figsize=(15, 5))
+ fig, ax = plt.subplots(
+ ncols=3, nrows=1, num="Unregistered images", figsize=(15, 5)
+ )
ax[0].imshow(self.stack[0], vmin=0, vmax=1)
ax[0].set_title("First image")
ax[1].imshow(self.stack[image_index], vmin=0, vmax=1)
ax[1].set_title(f"Image #{image_index}")
- image = ax[2].imshow(np.divide(self.stack[image_index], self.stack[0]), vmin=vmin, vmax=vmax)
+ image = ax[2].imshow(
+ np.divide(self.stack[image_index], self.stack[0]), vmin=vmin, vmax=vmax
+ )
ax[2].set_title(f"Image[{image_index}] / First image")
cb = plt.colorbar(image, ax=ax[2])
v = interactive(
preview_unregistered,
- image_index=widgets.IntSlider(min=0, max=len(self.list_of_files) - 1, value=1),
+ image_index=widgets.IntSlider(
+ min=0, max=len(self.list_of_files) - 1, value=1
+ ),
vmin=widgets.FloatSlider(min=0, max=2, value=0.8),
vmax=widgets.FloatSlider(min=0, max=2, value=1.2),
)
@@ -142,7 +153,9 @@ def _get_crop_region(self, selector):
return x0, x1, y0, y1, data_have_been_cropped
def perform_cropping(self):
- x0, x1, y0, y1, data_have_been_cropped = self._get_crop_region(self.selector_unregistered)
+ x0, x1, y0, y1, data_have_been_cropped = self._get_crop_region(
+ self.selector_unregistered
+ )
self.crop["before registration"] = {"x0": x0, "x1": x1, "y0": y0, "y1": y1}
if data_have_been_cropped:
@@ -197,7 +210,9 @@ def preview_registered(image_index, vmin=0.8, vmax=1.2):
if self.fig2:
self.fig2.clear()
- self.fig2, ax2 = plt.subplots(ncols=3, nrows=1, num="Registered images", figsize=(15, 5))
+ self.fig2, ax2 = plt.subplots(
+ ncols=3, nrows=1, num="Registered images", figsize=(15, 5)
+ )
ax2[0].imshow(self.registered_stack[0], vmin=0, vmax=1)
ax2[0].set_title("First image")
@@ -206,7 +221,9 @@ def preview_registered(image_index, vmin=0.8, vmax=1.2):
ax2[1].set_title(f"Image #{image_index}")
image = ax2[2].imshow(
- np.divide(self.registered_stack[image_index], self.registered_stack[0]), vmin=vmin, vmax=vmax
+ np.divide(self.registered_stack[image_index], self.registered_stack[0]),
+ vmin=vmin,
+ vmax=vmax,
)
ax2[2].set_title(f"Image[{image_index}] / First image")
cb = plt.colorbar(image, ax=ax2[2])
@@ -214,7 +231,9 @@ def preview_registered(image_index, vmin=0.8, vmax=1.2):
v2 = interactive(
preview_registered,
- image_index=widgets.IntSlider(min=0, max=len(self.list_of_files) - 1, value=1),
+ image_index=widgets.IntSlider(
+ min=0, max=len(self.list_of_files) - 1, value=1
+ ),
vmin=widgets.FloatSlider(min=0, max=2, value=0.8),
vmax=widgets.FloatSlider(min=0, max=2, value=1.2),
)
@@ -252,7 +271,9 @@ def _select_callback(eclick, erelease):
ax.set_title("Click and drag to select region to crop")
def perform_cropping_for_export(self):
- x0, x1, y0, y1, data_have_been_cropped = self._get_crop_region(self.selector_registered)
+ x0, x1, y0, y1, data_have_been_cropped = self._get_crop_region(
+ self.selector_registered
+ )
self.crop["after registration"] = {"x0": x0, "x1": x1, "y0": y0, "y1": y1}
if data_have_been_cropped:
@@ -295,12 +316,16 @@ def export_images(self, output_folder):
# create output folder
source_folder = os.path.basename(os.path.dirname(list_file_names[0]))
time_stamp = get_current_time_in_special_file_name_format()
- full_output_folder_name = os.path.join(output_folder, f"{source_folder}_{time_stamp}")
+ full_output_folder_name = os.path.join(
+ output_folder, f"{source_folder}_{time_stamp}"
+ )
make_or_reset_folder(full_output_folder_name)
for i, file_name in tqdm(enumerate(list_file_names)):
short_file_name = os.path.basename(file_name)
- full_output_file_name = os.path.join(full_output_folder_name, short_file_name)
+ full_output_file_name = os.path.join(
+ full_output_folder_name, short_file_name
+ )
_image = Image.fromarray(registered_crop_stack[i])
_image.save(full_output_file_name)
@@ -309,10 +334,17 @@ def export_images(self, output_folder):
"input folder": source_folder,
"number of files": len(list_file_names),
"crop": self.crop,
- "registration": {"type": self.algo_options.value, "image of reference": self.reference_options.value},
+ "registration": {
+ "type": self.algo_options.value,
+ "image of reference": self.reference_options.value,
+ },
}
json_file_name = os.path.join(full_output_folder_name, "config.json")
save_json(json_file_name, metadata)
- self.output_label.value = f"DONE! (Registered files have been created in {full_output_folder_name})"
- display(HTML(f"Registered files have been created in {full_output_folder_name}"))
+ self.output_label.value = (
+ f"DONE! (Registered files have been created in {full_output_folder_name})"
+ )
+ display(
+ HTML(f"Registered files have been created in {full_output_folder_name}")
+ )
diff --git a/notebooks/__code/integrated_roi_counts_vs_file_name_and_time_stamp.py b/notebooks/__code/integrated_roi_counts_vs_file_name_and_time_stamp.py
index 72adf801..e52441a4 100755
--- a/notebooks/__code/integrated_roi_counts_vs_file_name_and_time_stamp.py
+++ b/notebooks/__code/integrated_roi_counts_vs_file_name_and_time_stamp.py
@@ -25,7 +25,9 @@ def _fromUtf8(s):
from __code._utilities.color import Color
from __code.decorators import wait_cursor
from __code.file_handler import make_ascii_file, retrieve_time_stamp
-from __code.ui_integrated_roi_counts_vs_file_name_and_time_stamp import Ui_MainWindow as UiMainWindow
+from __code.ui_integrated_roi_counts_vs_file_name_and_time_stamp import (
+ Ui_MainWindow as UiMainWindow,
+)
class IntegratedRoiUi(QMainWindow):
@@ -166,7 +168,9 @@ def update_all_plots(self):
nbr_profile = len(list_index_profile_selected)
nbr_file_selected = len(list_index_file_selected)
color = Color()
- list_rgb_profile_color = color.get_list_rgb(nbr_color=(nbr_profile * nbr_file_selected))
+ list_rgb_profile_color = color.get_list_rgb(
+ nbr_color=(nbr_profile * nbr_file_selected)
+ )
self.ui.all_plots_view.clear()
if nbr_profile == 0:
return
@@ -180,10 +184,16 @@ def update_all_plots(self):
for _color_index_file, _index_file in enumerate(list_index_file_selected):
_data = self.data_dict["data"][_index_file]
- for _color_index_profile, _index_profile in enumerate(list_index_profile_selected):
+ for _color_index_profile, _index_profile in enumerate(
+ list_index_profile_selected
+ ):
legend = f"File #{_index_file} - Profile #{_index_profile}"
- _color = list_rgb_profile_color[_color_index_file + _color_index_profile * nbr_file_selected]
- [x_axis, y_axis] = self.get_profile(image=np.transpose(_data), profile_roi_row=_index_profile)
+ _color = list_rgb_profile_color[
+ _color_index_file + _color_index_profile * nbr_file_selected
+ ]
+ [x_axis, y_axis] = self.get_profile(
+ image=np.transpose(_data), profile_roi_row=_index_profile
+ )
self.ui.all_plots_view.plot(x_axis, y_axis, name=legend, pen=_color)
def display_image(self, recalculate_image=False):
@@ -252,7 +262,9 @@ def is_row_enabled(self, row=-1):
def update_guide_table_using_guide_rois(self):
for _row, _roi in enumerate(self.list_guide_pyqt_roi):
if self.is_row_enabled(row=_row):
- region = _roi.getArraySlice(self.live_image, self.ui.image_view.imageItem)
+ region = _roi.getArraySlice(
+ self.live_image, self.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop
@@ -320,7 +332,9 @@ def rename_all_plots_profiles_table(self):
"""rename all the profile name"""
nbr_row = self.ui.tableWidget.rowCount()
for _row in np.arange(nbr_row):
- self.ui.all_plots_profiles_table.item(_row, 0).setText(f"Profile # {_row+1}")
+ self.ui.all_plots_profiles_table.item(_row, 0).setText(
+ f"Profile # {_row+1}"
+ )
# setter
def set_item_all_plots_profile_table(self, row=0):
@@ -346,12 +360,16 @@ def set_item_all_plots_profile_table(self, row=0):
def set_item_main_table(self, row=0, col=0, value=""):
if col == 0:
- spacerItem_left = QtGui.QSpacerItem(408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
+ spacerItem_left = QtGui.QSpacerItem(
+ 408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding
+ )
widget = QtGui.QCheckBox()
widget.blockSignals(True)
self.list_table_widget_checkbox.insert(row, widget)
widget.stateChanged.connect(self.guide_state_changed)
- spacerItem_right = QtGui.QSpacerItem(408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
+ spacerItem_right = QtGui.QSpacerItem(
+ 408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding
+ )
hori_layout = QtGui.QHBoxLayout()
hori_layout.addItem(spacerItem_left)
hori_layout.addWidget(widget)
@@ -377,7 +395,9 @@ def get_profile_dimensions(self, row=-1):
y_top = int(y0)
y_bottom = int(y0) + int(height)
- Profile = collections.namedtuple("Profile", ["x_left", "x_right", "y_top", "y_bottom"])
+ Profile = collections.namedtuple(
+ "Profile", ["x_left", "x_right", "y_top", "y_bottom"]
+ )
result = Profile(x_left, x_right, y_top, y_bottom)
return result
@@ -403,7 +423,9 @@ def get_profile(self, profile_roi_row=-1):
x_axis = np.arange(len(self.data_dict["file_name"]))
for _data in self.data_dict["data"]:
- _roi_counts = _data[y_top:y_bottom, x_left:x_right] # because pyqtgrpah display transpose images
+ _roi_counts = _data[
+ y_top:y_bottom, x_left:x_right
+ ] # because pyqtgrpah display transpose images
if inte_algo == "add":
_counts = np.sum(_roi_counts)
elif inte_algo == "mean":
@@ -470,7 +492,9 @@ def highlight_guide_profile_pyqt_rois(self, row=-1):
return
try:
- self._highlights_guide_profile_pyqt_roi(row=previous_active_row, status="deactivated")
+ self._highlights_guide_profile_pyqt_roi(
+ row=previous_active_row, status="deactivated"
+ )
self._highlights_guide_profile_pyqt_roi(row=row, status="activated")
except:
pass
@@ -624,7 +648,10 @@ def profile_along_axis_changed(self):
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(
- self, directory=self.working_dir, caption="Select Output Folder", options=QFileDialog.ShowDirsOnly
+ self,
+ directory=self.working_dir,
+ caption="Select Output Folder",
+ options=QFileDialog.ShowDirsOnly,
)
if _export_folder:
o_export = ExportProfiles(parent=self, export_folder=_export_folder)
@@ -658,7 +685,10 @@ def __init__(self, parent=None, export_folder=""):
def _create_output_file_name(self):
base_name = os.path.basename(self.parent.working_dir)
nbr_profile = self.parent.ui.tableWidget.rowCount()
- output_file_name = os.path.join(self.export_folder, f"{base_name}_{nbr_profile}_integrated_counts_regions.txt")
+ output_file_name = os.path.join(
+ self.export_folder,
+ f"{base_name}_{nbr_profile}_integrated_counts_regions.txt",
+ )
return output_file_name
def _create_metadata(self):
@@ -675,7 +705,9 @@ def _create_metadata(self):
x_right = profile_dimension.x_right
y_top = profile_dimension.y_top
y_bottom = profile_dimension.y_bottom
- metadata.append(f"#ROI #{_profile_index}: [x0, y0, x1, y1] = [{x_left}, {y_top}, {x_right}, {y_bottom}]")
+ metadata.append(
+ f"#ROI #{_profile_index}: [x0, y0, x1, y1] = [{x_left}, {y_top}, {x_right}, {y_bottom}]"
+ )
axis.append(f"ROI #{_profile_index}")
metadata.append("#")
@@ -686,7 +718,9 @@ def _create_data(self, profile_index=0):
all_profiles = []
x_axis = []
for _data in self.parent.data_dict["data"]:
- [x_axis, profile] = self.parent.get_profile(image=np.transpose(_data), profile_roi_row=profile_index)
+ [x_axis, profile] = self.parent.get_profile(
+ image=np.transpose(_data), profile_roi_row=profile_index
+ )
all_profiles.append(list(profile))
data = []
@@ -710,7 +744,9 @@ def _format_data(self):
_time_stamp = str(self.parent.ui.summary_table.item(_row, 1).text())
_formated_row_value = " ,".join(_row_value)
- _formated_row = f"{_file_index}, {_file_name}, {_time_stamp}, " + _formated_row_value
+ _formated_row = (
+ f"{_file_index}, {_file_name}, {_time_stamp}, " + _formated_row_value
+ )
_data.append(_formated_row)
return _data
@@ -721,7 +757,9 @@ def run(self):
# create output file name
_output_file_name = self._create_output_file_name()
- make_ascii_file(metadata=metadata, data=data, output_file_name=_output_file_name, dim="1d")
+ make_ascii_file(
+ metadata=metadata, data=data, output_file_name=_output_file_name, dim="1d"
+ )
class GuideAndProfileRoisHandler:
@@ -740,14 +778,19 @@ def add(self):
self.parent.list_profile_pyqt_roi.insert(self.row, self.__profile)
def update(self):
- self.parent.ui.image_view.removeItem(self.parent.list_profile_pyqt_roi[self.row])
+ self.parent.ui.image_view.removeItem(
+ self.parent.list_profile_pyqt_roi[self.row]
+ )
self.parent.list_profile_pyqt_roi[self.row] = self.__profile
def _define_guide(self):
"""define the guide"""
guide_roi = pg.RectROI(
[self.parent.default_guide_roi["x0"], self.parent.default_guide_roi["y0"]],
- [self.parent.default_guide_roi["width"], self.parent.default_guide_roi["height"]],
+ [
+ self.parent.default_guide_roi["width"],
+ self.parent.default_guide_roi["height"],
+ ],
pen=self.parent.default_guide_roi["color_activated"],
)
guide_roi.addScaleHandle([1, 1], [0, 0])
@@ -770,15 +813,21 @@ def timestamp_dict(self):
def table(self):
# init the summary table
list_files_full_name = self.parent.data_dict["file_name"]
- list_files_short_name = [os.path.basename(_file) for _file in list_files_full_name]
+ list_files_short_name = [
+ os.path.basename(_file) for _file in list_files_full_name
+ ]
list_time_stamp = self.parent.timestamp_dict["list_time_stamp"]
- list_time_stamp_user_format = self.parent.timestamp_dict["list_time_stamp_user_format"]
+ list_time_stamp_user_format = self.parent.timestamp_dict[
+ "list_time_stamp_user_format"
+ ]
time_0 = list_time_stamp[0]
for _row, _file in enumerate(list_files_short_name):
self.parent.ui.summary_table.insertRow(_row)
self.set_item_summary_table(row=_row, col=0, value=_file)
- self.set_item_summary_table(row=_row, col=1, value=list_time_stamp_user_format[_row])
+ self.set_item_summary_table(
+ row=_row, col=1, value=list_time_stamp_user_format[_row]
+ )
_offset = list_time_stamp[_row] - time_0
self.set_item_summary_table(row=_row, col=2, value=f"{_offset:0.2f}")
@@ -789,7 +838,9 @@ def parameters(self):
self.parent.default_guide_roi["height"] = int(height / 5)
self.parent.default_guide_roi["x0"] = int(width / 2)
self.parent.default_guide_roi["y0"] = int(height / 2)
- self.parent.default_profile_width_values = [str(_value) for _value in self.parent.default_profile_width_values]
+ self.parent.default_profile_width_values = [
+ str(_value) for _value in self.parent.default_profile_width_values
+ ]
def widgets(self):
self.parent.ui.splitter_2.setSizes([250, 50])
@@ -801,7 +852,9 @@ def widgets(self):
# update size of summary table
nbr_columns = self.parent.ui.summary_table.columnCount()
for _col in range(nbr_columns):
- self.parent.ui.summary_table.setColumnWidth(_col, self.parent.summary_table_width[_col])
+ self.parent.ui.summary_table.setColumnWidth(
+ _col, self.parent.summary_table_width[_col]
+ )
self.parent.display_ui = [
self.parent.ui.display_size_label,
@@ -852,7 +905,8 @@ def get_image_selected(self, recalculate_image=False):
angle = self.parent.rotation_angle
# rotate all images
self.parent.data_dict["data"] = [
- transform.rotate(_image, angle) for _image in self.parent.data_dict_raw["data"]
+ transform.rotate(_image, angle)
+ for _image in self.parent.data_dict_raw["data"]
]
_image = self.parent.data_dict["data"][slider_index]
@@ -876,7 +930,9 @@ def display_images(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def calculate_matrix_grid(self, grid_size=1, height=1, width=1):
"""calculate the matrix that defines the vertical and horizontal lines
@@ -926,12 +982,17 @@ def display_grid(self):
grid_size = self.parent.ui.grid_size_slider.value()
[width, height] = np.shape(self.parent.live_image)
- pos_adj_dict = self.calculate_matrix_grid(grid_size=grid_size, height=height, width=width)
+ pos_adj_dict = self.calculate_matrix_grid(
+ grid_size=grid_size, height=height, width=width
+ )
pos = pos_adj_dict["pos"]
adj = pos_adj_dict["adj"]
line_color = self.parent.grid_view["color"]
- _transparency_value = 255 - (float(str(self.parent.ui.transparency_slider.value())) / 100) * 255
+ _transparency_value = (
+ 255
+ - (float(str(self.parent.ui.transparency_slider.value())) / 100) * 255
+ )
_list_line_color = list(line_color)
_list_line_color[3] = _transparency_value
line_color = tuple(_list_line_color)
diff --git a/notebooks/__code/ipywe/_version.py b/notebooks/__code/ipywe/_version.py
index c2e98622..f99b78f7 100755
--- a/notebooks/__code/ipywe/_version.py
+++ b/notebooks/__code/ipywe/_version.py
@@ -6,7 +6,9 @@
version_info[0],
version_info[1],
version_info[2],
- "" if version_info[3] == "final" else _specifier_[version_info[3]] + str(version_info[4]),
+ ""
+ if version_info[3] == "final"
+ else _specifier_[version_info[3]] + str(version_info[4]),
)
__frontend_version__ = "^0.1.3-alpha.2"
diff --git a/notebooks/__code/ipywe/example.py b/notebooks/__code/ipywe/example.py
index b87da7a4..194887af 100755
--- a/notebooks/__code/ipywe/example.py
+++ b/notebooks/__code/ipywe/example.py
@@ -28,7 +28,9 @@ class HelloWorld(widgets.DOMWidget):
def get_js():
import os
- js = open(os.path.join(os.path.dirname(__file__), "..", "js", "src", "example.js")).read()
+ js = open(
+ os.path.join(os.path.dirname(__file__), "..", "js", "src", "example.js")
+ ).read()
return js.decode("UTF-8")
def run_js():
diff --git a/notebooks/__code/ipywe/fileselector.py b/notebooks/__code/ipywe/fileselector.py
index a72db518..1c505713 100755
--- a/notebooks/__code/ipywe/fileselector.py
+++ b/notebooks/__code/ipywe/fileselector.py
@@ -21,11 +21,18 @@ class FileSelectorPanel:
# statement should change the width of the file selector. "width="
# doesn't appear to work in earlier versions.
select_layout = ipyw.Layout(width="99%", height="260px")
- select_multiple_layout = ipyw.Layout(width="99%", height="360px") # , display="flex", flex_flow="column")
+ select_multiple_layout = ipyw.Layout(
+ width="99%", height="360px"
+ ) # , display="flex", flex_flow="column")
button_layout = ipyw.Layout(margin="5px 40px", border="1px solid gray")
- toolbar_button_layout = ipyw.Layout(margin="5px 10px", width="100px", border="1px solid gray")
+ toolbar_button_layout = ipyw.Layout(
+ margin="5px 10px", width="100px", border="1px solid gray"
+ )
toolbar_box_layout = ipyw.Layout(
- border="1px solid lightgrey", padding="3px", margin="5px 50px 5px 5px", width="100%"
+ border="1px solid lightgrey",
+ padding="3px",
+ margin="5px 50px 5px 5px",
+ width="100%",
)
label_layout = ipyw.Layout(width="100%")
layout = ipyw.Layout()
@@ -136,7 +143,9 @@ def createBody(self, curdir):
left_vbox = ipyw.VBox(left_widgets, layout=ipyw.Layout(width="80%"))
# right
# change directory button
- self.changedir = ipyw.Button(description="Change directory", layout=self.button_layout)
+ self.changedir = ipyw.Button(
+ description="Change directory", layout=self.button_layout
+ )
self.changedir.on_click(self.handle_changedir)
# select button
ok_layout = cloneLayout(self.button_layout)
@@ -147,7 +156,9 @@ def createBody(self, curdir):
right_vbox = ipyw.VBox(children=[self.changedir, self.ok])
select_panel = ipyw.HBox(
children=[left_vbox, right_vbox],
- layout=ipyw.Layout(border="1px solid lightgrey", margin="5px", padding="10px"),
+ layout=ipyw.Layout(
+ border="1px solid lightgrey", margin="5px", padding="10px"
+ ),
)
body = ipyw.VBox(children=[toolbar, select_panel], layout=self.layout)
self.footer.value = ""
@@ -158,20 +169,34 @@ def createToolbar(self):
# "jump to"
curdir = self.curdir
self.jumpto_input = jumpto_input = ipyw.Text(
- value=curdir, placeholder="", description="Location: ", layout=ipyw.Layout(width="100%")
+ value=curdir,
+ placeholder="",
+ description="Location: ",
+ layout=ipyw.Layout(width="100%"),
+ )
+ jumpto_button = ipyw.Button(
+ description="Jump", layout=self.toolbar_button_layout
)
- jumpto_button = ipyw.Button(description="Jump", layout=self.toolbar_button_layout)
jumpto_button.on_click(self.handle_jumpto)
- jumpto = ipyw.HBox(children=[jumpto_input, jumpto_button], layout=self.toolbar_box_layout)
+ jumpto = ipyw.HBox(
+ children=[jumpto_input, jumpto_button], layout=self.toolbar_box_layout
+ )
self.jumpto_button = jumpto_button
if self.newdir_toolbar_button:
# "new dir"
self.newdir_input = newdir_input = ipyw.Text(
- value="", placeholder="new dir name", description="New subdir: ", layout=ipyw.Layout(width="180px")
+ value="",
+ placeholder="new dir name",
+ description="New subdir: ",
+ layout=ipyw.Layout(width="180px"),
+ )
+ newdir_button = ipyw.Button(
+ description="Create", layout=self.toolbar_button_layout
)
- newdir_button = ipyw.Button(description="Create", layout=self.toolbar_button_layout)
newdir_button.on_click(self.handle_newdir)
- newdir = ipyw.HBox(children=[newdir_input, newdir_button], layout=self.toolbar_box_layout)
+ newdir = ipyw.HBox(
+ children=[newdir_input, newdir_button], layout=self.toolbar_box_layout
+ )
toolbar = ipyw.HBox(children=[jumpto, newdir])
else:
toolbar = ipyw.HBox(children=[jumpto])
@@ -185,7 +210,9 @@ def getEntries(self):
entries_files = self.getFilteredEntries()
else:
entries_files = sorted(os.listdir(curdir))
- entries_files = [_f for _f in entries_files if (self.searching_string in _f)]
+ entries_files = [
+ _f for _f in entries_files if (self.searching_string in _f)
+ ]
#
# if self.sort_by_alphabetical:
@@ -212,11 +239,19 @@ def createSelectWidget(self):
if self.multiple:
value = []
self.select = ipyw.SelectMultiple(
- value=value, options=entries, description="Select", layout=self.select_multiple_layout
+ value=value,
+ options=entries,
+ description="Select",
+ layout=self.select_multiple_layout,
)
else:
value = entries[0]
- self.select = ipyw.Select(value=value, options=entries, description="Select", layout=self.select_layout)
+ self.select = ipyw.Select(
+ value=value,
+ options=entries,
+ description="Select",
+ layout=self.select_layout,
+ )
"""When ipywidgets 7.0 is released, the old way that the select or select multiple
widget was set up (see below) should work so long as self.select_layout is changed
to include the display="flex" and flex_flow="column" statements. In ipywidgets 6.0,
@@ -231,13 +266,17 @@ def createSelectWidget(self):
def createSearchWidget(self):
label = ipyw.Label("Search:")
- self.search_text = ipyw.Text(self.searching_string, layout=ipyw.Layout(width="50%"))
+ self.search_text = ipyw.Text(
+ self.searching_string, layout=ipyw.Layout(width="50%")
+ )
self.search_text.on_submit(self.handle_search_changed)
elements = [label, self.search_text]
if self.filter_widget:
elements.append(self.filter_widget)
- self.search = ipyw.HBox(elements, layout=ipyw.Layout(justify_content="flex-end"))
+ self.search = ipyw.HBox(
+ elements, layout=ipyw.Layout(justify_content="flex-end")
+ )
def handle_search_changed(self, sender):
self.searching_string = sender.value.strip()
@@ -249,7 +288,9 @@ def createFilterWidget(self):
self.filters.update(All=["*.*"])
self.cur_filter = self.cur_filter or self.filters[self.default_filter or "All"]
self.filter_widget = ipyw.Dropdown(
- options=self.filters, value=self.cur_filter, layout=ipyw.Layout(align_self="flex-end", width="15%")
+ options=self.filters,
+ value=self.cur_filter,
+ layout=ipyw.Layout(align_self="flex-end", width="15%"),
)
self.filter_widget.observe(self.handle_filter_changed, names="value")
return
@@ -266,7 +307,9 @@ def getFilteredEntries(self):
# filter out dirs, they will be added below
list_files = filter(lambda o: not os.path.isdir(o), list_files)
list_files = list(map(os.path.basename, list_files))
- list_dirs = [o for o in os.listdir(curdir) if os.path.isdir(os.path.join(curdir, o))]
+ list_dirs = [
+ o for o in os.listdir(curdir) if os.path.isdir(os.path.join(curdir, o))
+ ]
self.footer.value += "" + " ".join(list_dirs) + "
"
entries = list_dirs + list_files
entries.sort()
diff --git a/notebooks/__code/ipywe/imagedisplay.py b/notebooks/__code/ipywe/imagedisplay.py
index 61201fad..774c63df 100755
--- a/notebooks/__code/ipywe/imagedisplay.py
+++ b/notebooks/__code/ipywe/imagedisplay.py
@@ -59,7 +59,11 @@ def __init__(self, image, width, height, init_roi=None):
def createImg(self):
if self._img_min >= self._img_max:
self._img_max = self._img_min + abs(self._img_max - self._img_min) * 1e-5
- img = ((self.curr_img_data - self._img_min) / (self._img_max - self._img_min) * (2**8 - 1)).astype("uint8")
+ img = (
+ (self.curr_img_data - self._img_min)
+ / (self._img_max - self._img_min)
+ * (2**8 - 1)
+ ).astype("uint8")
size = np.max(img.shape)
view_size = np.max((self.width, self.height))
if size > view_size:
@@ -129,7 +133,9 @@ def zoomImg(self, change):
self._extracols = 0
extrarows_top = np.full((addtop, self._ncols), 1)
extrarows_bottom = np.full((addbottom, self._ncols), 1)
- self.curr_img_data = np.vstack((extrarows_top, self.curr_img_data, extrarows_bottom))
+ self.curr_img_data = np.vstack(
+ (extrarows_top, self.curr_img_data, extrarows_bottom)
+ )
else:
diff = self._nrows - self._ncols
if diff % 2 == 0:
@@ -146,9 +152,15 @@ def zoomImg(self, change):
self._extracols = diff
extrarows_left = np.full((self._nrows, addleft), 1)
extrarows_right = np.full((self._nrows, addright), 1)
- self.curr_img_data = np.hstack((extrarows_left, self.curr_img_data, extrarows_right))
- self._xcoord_max_roi = self._xcoord_absolute + self._ncols_currimg - self._extracols
- self._ycoord_max_roi = self._ycoord_absolute + self._nrows_currimg - self._extrarows
+ self.curr_img_data = np.hstack(
+ (extrarows_left, self.curr_img_data, extrarows_right)
+ )
+ self._xcoord_max_roi = (
+ self._xcoord_absolute + self._ncols_currimg - self._extracols
+ )
+ self._ycoord_max_roi = (
+ self._ycoord_absolute + self._nrows_currimg - self._extrarows
+ )
self._b64value = self.createImg()
return
diff --git a/notebooks/__code/ipywe/imageslider.py b/notebooks/__code/ipywe/imageslider.py
index 3b56dead..0444444f 100755
--- a/notebooks/__code/ipywe/imageslider.py
+++ b/notebooks/__code/ipywe/imageslider.py
@@ -163,7 +163,11 @@ def getimg_bytes(self):
# apply intensity range
self.curr_img_data[self.curr_img_data < self._img_min] = self._img_min
self.curr_img_data[self.curr_img_data > self._img_max] = self._img_max
- img = ((self.curr_img_data - self._img_min) / (self._img_max - self._img_min) * (2**8 - 1)).astype("uint8")
+ img = (
+ (self.curr_img_data - self._img_min)
+ / (self._img_max - self._img_min)
+ * (2**8 - 1)
+ ).astype("uint8")
size = np.max(img.shape)
view_size = np.max((self.width, self.height))
# resample if necessary
@@ -323,7 +327,9 @@ def update_image_div_data_with_zoom(self):
self._extracols = 0
extrarows_top = np.full((addtop, self._ncols), 1)
extrarows_bottom = np.full((addbottom, self._ncols), 1)
- self.curr_img_data = np.vstack((extrarows_top, self.curr_img_data, extrarows_bottom))
+ self.curr_img_data = np.vstack(
+ (extrarows_top, self.curr_img_data, extrarows_bottom)
+ )
else:
diff = self._nrows - self._ncols
if diff % 2 == 0:
@@ -340,8 +346,14 @@ def update_image_div_data_with_zoom(self):
self._extracols = diff
extrarows_left = np.full((self._nrows, addleft), 1)
extrarows_right = np.full((self._nrows, addright), 1)
- self.curr_img_data = np.hstack((extrarows_left, self.curr_img_data, extrarows_right))
- self._xcoord_max_roi = self._xcoord_absolute + self._ncols_currimg - self._extracols
- self._ycoord_max_roi = self._ycoord_absolute + self._nrows_currimg - self._extrarows
+ self.curr_img_data = np.hstack(
+ (extrarows_left, self.curr_img_data, extrarows_right)
+ )
+ self._xcoord_max_roi = (
+ self._xcoord_absolute + self._ncols_currimg - self._extracols
+ )
+ self._ycoord_max_roi = (
+ self._ycoord_absolute + self._nrows_currimg - self._extrarows
+ )
self._b64value = self.getimg_bytes()
return
diff --git a/notebooks/__code/ipywe/imgdatagraph.py b/notebooks/__code/ipywe/imgdatagraph.py
index e07f54dc..51f61785 100755
--- a/notebooks/__code/ipywe/imgdatagraph.py
+++ b/notebooks/__code/ipywe/imgdatagraph.py
@@ -63,7 +63,10 @@ def __init__(self, image, width, height, uformat="png"):
self.height = height
self._format = uformat
self._nrows, self._ncols = self.img_data.shape
- self._img_min, self._img_max = int(np.min(self.img_data)), int(np.max(self.img_data))
+ self._img_min, self._img_max = (
+ int(np.min(self.img_data)),
+ int(np.max(self.img_data)),
+ )
self._b64value = self.getimg_bytes()
super(ImageDataGraph, self).__init__()
return
@@ -71,7 +74,11 @@ def __init__(self, image, width, height, uformat="png"):
def getimg_bytes(self):
"""Encodes the image's data into Base64."""
- img = ((self.img_data - self._img_min) / (self._img_max - self._img_min) * (2**8 - 1)).astype("uint8")
+ img = (
+ (self.img_data - self._img_min)
+ / (self._img_max - self._img_min)
+ * (2**8 - 1)
+ ).astype("uint8")
size = np.max(img.shape)
view_size = np.max((self.width, self.height))
if size > view_size:
@@ -215,7 +222,9 @@ def width_graph(self):
elif p1y_abs != p2y_abs and p1x_abs == p2x_abs:
dists, vals, bar_width = self.get_data_vertical(p1x_abs, p1y_abs, p2y_abs)
else:
- dists, vals, bar_width = self.get_data_diagonal(p1x_abs, p1y_abs, p2x_abs, p2y_abs)
+ dists, vals, bar_width = self.get_data_diagonal(
+ p1x_abs, p1y_abs, p2x_abs, p2y_abs
+ )
plt.bar(dists, vals, width=bar_width)
plt.xlabel("Distance from Initial Point")
plt.ylabel("Value")
@@ -277,7 +286,9 @@ def get_data_horizontal(self, x_init, y_init, x_fin):
ind = bins.index(b)
if ind < len(bins) - 1:
if x >= b + x0 and x < bins[ind + 1] + x0:
- intensities[ind] = intensities[ind] + self.img_data[int(y), int(x)]
+ intensities[ind] = (
+ intensities[ind] + self.img_data[int(y), int(x)]
+ )
num_binvals[ind] = num_binvals[ind] + 1
break
for val, num in np.nditer([intensities, num_binvals]):
@@ -328,7 +339,9 @@ def get_data_vertical(self, x_init, y_init, y_fin):
ind = bins.index(b)
if ind < len(bins) - 1:
if y >= b + y0 and y < bins[ind + 1] + y0:
- intensities[ind] = intensities[ind] + self.img_data[int(y), int(x)]
+ intensities[ind] = (
+ intensities[ind] + self.img_data[int(y), int(x)]
+ )
num_binvals[ind] = num_binvals[ind] + 1
break
for val, num in np.nditer([intensities, num_binvals]):
@@ -413,7 +426,9 @@ def get_data_diagonal(self, x_init, y_init, x_fin, y_fin):
ind = bin_borders.index(b)
if ind < len(bin_borders) - 1:
if p >= b and p < bin_borders[ind + 1]:
- intensities[ind] = intensities[ind] + self.img_data[int(y), int(x)]
+ intensities[ind] = (
+ intensities[ind] + self.img_data[int(y), int(x)]
+ )
num_binvals[ind] = num_binvals[ind] + 1
break
for i, n in np.nditer([intensities, num_binvals]):
diff --git a/notebooks/__code/ipywe/myfileselector.py b/notebooks/__code/ipywe/myfileselector.py
index 8b2afab0..120817e2 100755
--- a/notebooks/__code/ipywe/myfileselector.py
+++ b/notebooks/__code/ipywe/myfileselector.py
@@ -17,11 +17,18 @@ class MyFileSelectorPanel:
# statement should change the width of the file selector. "width="
# doesn't appear to work in earlier versions.
select_layout = ipyw.Layout(width="99%", height="260px")
- select_multiple_layout = ipyw.Layout(width="99%", height="260px", display="flex", flex_flow="column")
+ select_multiple_layout = ipyw.Layout(
+ width="99%", height="260px", display="flex", flex_flow="column"
+ )
button_layout = ipyw.Layout(margin="5px 40px", border="1px solid gray")
- toolbar_button_layout = ipyw.Layout(margin="5px 10px", width="100px", border="1px solid gray")
+ toolbar_button_layout = ipyw.Layout(
+ margin="5px 10px", width="100px", border="1px solid gray"
+ )
toolbar_box_layout = ipyw.Layout(
- border="1px solid lightgrey", padding="3px", margin="5px 50px 5px 5px", width="100%"
+ border="1px solid lightgrey",
+ padding="3px",
+ margin="5px 50px 5px 5px",
+ width="100%",
)
label_layout = ipyw.Layout(width="250px")
layout = ipyw.Layout()
@@ -99,20 +106,34 @@ def createBody(self, curdir):
# toolbar on the top
# "jump to"
self.jumpto_input = jumpto_input = ipyw.Text(
- value=curdir, placeholder="", description="Location: ", layout=ipyw.Layout(width="100%")
+ value=curdir,
+ placeholder="",
+ description="Location: ",
+ layout=ipyw.Layout(width="100%"),
+ )
+ jumpto_button = ipyw.Button(
+ description="Jump", layout=self.toolbar_button_layout
)
- jumpto_button = ipyw.Button(description="Jump", layout=self.toolbar_button_layout)
jumpto_button.on_click(self.handle_jumpto)
- jumpto = ipyw.HBox(children=[jumpto_input, jumpto_button], layout=self.toolbar_box_layout)
+ jumpto = ipyw.HBox(
+ children=[jumpto_input, jumpto_button], layout=self.toolbar_box_layout
+ )
self.jumpto_button = jumpto_button
if self.newdir_toolbar_button:
# "new dir"
self.newdir_input = newdir_input = ipyw.Text(
- value="", placeholder="new dir name", description="New subdir: ", layout=ipyw.Layout(width="180px")
+ value="",
+ placeholder="new dir name",
+ description="New subdir: ",
+ layout=ipyw.Layout(width="180px"),
+ )
+ newdir_button = ipyw.Button(
+ description="Create", layout=self.toolbar_button_layout
)
- newdir_button = ipyw.Button(description="Create", layout=self.toolbar_button_layout)
newdir_button.on_click(self.handle_newdir)
- newdir = ipyw.HBox(children=[newdir_input, newdir_button], layout=self.toolbar_box_layout)
+ newdir = ipyw.HBox(
+ children=[newdir_input, newdir_button], layout=self.toolbar_box_layout
+ )
toolbar = ipyw.HBox(children=[jumpto, newdir])
else:
toolbar = ipyw.HBox(children=[jumpto])
@@ -137,11 +158,19 @@ def createBody(self, curdir):
if self.multiple:
value = []
self.select = ipyw.SelectMultiple(
- value=value, options=entries, description="Select", layout=self.select_multiple_layout
+ value=value,
+ options=entries,
+ description="Select",
+ layout=self.select_multiple_layout,
)
else:
value = entries[0]
- self.select = ipyw.Select(value=value, options=entries, description="Select", layout=self.select_layout)
+ self.select = ipyw.Select(
+ value=value,
+ options=entries,
+ description="Select",
+ layout=self.select_layout,
+ )
"""When ipywidgets 7.0 is released, the old way that the select or select multiple
widget was set up (see below) should work so long as self.select_layout is changed
to include the display="flex" and flex_flow="column" statements. In ipywidgets 6.0,
@@ -169,7 +198,9 @@ def createBody(self, curdir):
left_vbox = ipyw.VBox(left_widgets, layout=ipyw.Layout(width="80%"))
# right
# change directory button
- self.changedir = ipyw.Button(description="Change directory", layout=self.button_layout)
+ self.changedir = ipyw.Button(
+ description="Change directory", layout=self.button_layout
+ )
self.changedir.on_click(self.handle_changedir)
# select button
ok_layout = cloneLayout(self.button_layout)
@@ -180,7 +211,9 @@ def createBody(self, curdir):
right_vbox = ipyw.VBox(children=[self.changedir, self.ok])
select_panel = ipyw.HBox(
children=[left_vbox, right_vbox],
- layout=ipyw.Layout(border="1px solid lightgrey", margin="5px", padding="10px"),
+ layout=ipyw.Layout(
+ border="1px solid lightgrey", margin="5px", padding="10px"
+ ),
)
body = ipyw.VBox(children=[toolbar, select_panel], layout=self.layout)
self.footer.value = ""
@@ -191,7 +224,9 @@ def createFilterWidget(self):
self.filters.update(All=["*.*"])
self.cur_filter = self.cur_filter or self.filters[self.default_filter or "All"]
self.filter_widget = ipyw.Dropdown(
- options=self.filters, value=self.cur_filter, layout=ipyw.Layout(align_self="flex-end", width="15%")
+ options=self.filters,
+ value=self.cur_filter,
+ layout=ipyw.Layout(align_self="flex-end", width="15%"),
)
self.filter_widget.observe(self.handle_filter_changed, names="value")
return
@@ -207,7 +242,9 @@ def getFilteredEntries(self):
# filter out dirs, they will be added below
list_files = filter(lambda o: not os.path.isdir(o), list_files)
list_files = list(map(os.path.basename, list_files))
- list_dirs = [o for o in os.listdir(curdir) if os.path.isdir(os.path.join(curdir, o))]
+ list_dirs = [
+ o for o in os.listdir(curdir) if os.path.isdir(os.path.join(curdir, o))
+ ]
self.footer.value += "" + " ".join(list_dirs) + "
"
entries = list_dirs + list_files
return entries
@@ -418,7 +455,15 @@ def del_ftime(file_label):
class FileSelection:
next = None
- def __init__(self, working_dir="./", filter="", default_filter=None, next=None, instructions=None, multiple=True):
+ def __init__(
+ self,
+ working_dir="./",
+ filter="",
+ default_filter=None,
+ next=None,
+ instructions=None,
+ multiple=True,
+ ):
self.working_dir = working_dir
self.instuctions = instructions
self.filter = filter
@@ -431,7 +476,9 @@ def __init__(self, working_dir="./", filter="", default_filter=None, next=None,
def select_file_help(self, value):
import webbrowser
- webbrowser.open("https://neutronimaging.pages.ornl.gov/en/tutorial/notebooks/file_selector/#select_profile")
+ webbrowser.open(
+ "https://neutronimaging.pages.ornl.gov/en/tutorial/notebooks/file_selector/#select_profile"
+ )
def load_files(self, files):
o_norm = Normalization()
@@ -471,7 +518,10 @@ def select_data(self, check_shape=True):
)
else:
self.files_ui = fileselector.FileSelectorPanel(
- instruction=instructions, start_dir=self.working_dir, next=next, multiple=self.multiple
+ instruction=instructions,
+ start_dir=self.working_dir,
+ next=next,
+ multiple=self.multiple,
)
else:
@@ -487,7 +537,10 @@ def select_data(self, check_shape=True):
)
else:
self.files_ui = fileselector.FileSelectorPanel(
- instruction=instructions, start_dir=self.working_dir, next=next, multiple=self.multiple
+ instruction=instructions,
+ start_dir=self.working_dir,
+ next=next,
+ multiple=self.multiple,
)
self.files_ui.show()
@@ -554,14 +607,18 @@ def display_file_selector_from_home(ev):
list_buttons = []
if show_jump_to_share:
share_button = widgets.Button(
- description=f"Jump to {ipts} Shared Folder", button_style="success", layout=button_layout
+ description=f"Jump to {ipts} Shared Folder",
+ button_style="success",
+ layout=button_layout,
)
share_button.on_click(display_file_selector_from_shared)
list_buttons.append(share_button)
if show_jump_to_home:
home_button = widgets.Button(
- description="Jump to My Home Folder", button_style="success", layout=button_layout
+ description="Jump to My Home Folder",
+ button_style="success",
+ layout=button_layout,
)
home_button.on_click(display_file_selector_from_home)
list_buttons.append(home_button)
diff --git a/notebooks/__code/list_metadata_and_time_with_oncat.py b/notebooks/__code/list_metadata_and_time_with_oncat.py
index 9acbe0a0..68f9f924 100755
--- a/notebooks/__code/list_metadata_and_time_with_oncat.py
+++ b/notebooks/__code/list_metadata_and_time_with_oncat.py
@@ -21,17 +21,31 @@ def __init__(self):
self.oncat_session = _oncat.authentication()
if self.oncat_session is None:
- display(HTML('Wrong Password!'))
+ display(
+ HTML('Wrong Password!')
+ )
else:
- display(HTML('Valid Password!'))
+ display(
+ HTML(
+ 'Valid Password!'
+ )
+ )
def select_metadata(self, system=None, list_of_files=[]):
if not list_of_files:
- display(HTML('You need to select at least one file!'))
+ display(
+ HTML(
+ 'You need to select at least one file!'
+ )
+ )
return
if not system:
- display(HTML('No input folder selected!'))
+ display(
+ HTML(
+ 'No input folder selected!'
+ )
+ )
return
self.instrument = system.System.get_instrument_selected()
@@ -40,12 +54,19 @@ def select_metadata(self, system=None, list_of_files=[]):
self.first_file = list_of_files[0]
self.list_metadata_with_examples = self.retrieve_list_metadata_with_examples()
- display(HTML('CTRL + Click to select multiple rows!'))
+ display(
+ HTML(
+ 'CTRL + Click to select multiple rows!'
+ )
+ )
box1 = widgets.HBox(
[
- widgets.Label("Select Metadata To Retrieve", layout=widgets.Layout(width="20%")),
+ widgets.Label(
+ "Select Metadata To Retrieve", layout=widgets.Layout(width="20%")
+ ),
widgets.SelectMultiple(
- options=self.list_metadata_with_examples, layout=widgets.Layout(width="80%", height="100%")
+ options=self.list_metadata_with_examples,
+ layout=widgets.Layout(width="80%", height="100%"),
),
],
layout=widgets.Layout(height="500px"),
@@ -56,7 +77,9 @@ def select_metadata(self, system=None, list_of_files=[]):
def retrieve_list_metadata_with_examples(self):
list_metadata = self.retrieve_list_metadata()
raw_data = self.raw_oncat_metadata
- list_metadata_with_examples = ListMetadata.format_list_metadata_with_examples(list_metadata, raw_data)
+ list_metadata_with_examples = ListMetadata.format_list_metadata_with_examples(
+ list_metadata, raw_data
+ )
return list_metadata_with_examples
@staticmethod
@@ -69,7 +92,10 @@ def format_list_metadata_with_examples(list_metadata, raw_data):
def retrieve_list_metadata(self):
_data = oncat.GetEverything(
- instrument=self.instrument, facility=self.facility, run=self.first_file, oncat=self.oncat_session
+ instrument=self.instrument,
+ facility=self.facility,
+ run=self.first_file,
+ oncat=self.oncat_session,
)
self.raw_oncat_data = _data.datafiles
@@ -89,7 +115,9 @@ def create_sorted_dict_metadata(self, _data):
def export_ascii(self, output_folder):
list_files = self.list_of_files
projection = self.create_projection()
- output_ascii_file_name = ListMetadata.create_output_ascii_name(list_files, output_folder)
+ output_ascii_file_name = ListMetadata.create_output_ascii_name(
+ list_files, output_folder
+ )
o_metadata_selected = oncat.GetProjection(
instrument=self.instrument,
@@ -103,7 +131,12 @@ def export_ascii(self, output_folder):
name_metadata = self.create_metadata_name_row()
value_metadata = self.create_metadata_value_rows(list_files, metadata_selected)
- make_ascii_file(metadata=name_metadata, data=value_metadata, output_file_name=output_ascii_file_name, dim="1d")
+ make_ascii_file(
+ metadata=name_metadata,
+ data=value_metadata,
+ output_file_name=output_ascii_file_name,
+ dim="1d",
+ )
print("Done!")
display(
HTML(
@@ -116,10 +149,14 @@ def export_ascii(self, output_folder):
def create_metadata_value_rows(self, list_files, metadata_selected):
value_metadata = []
for _file in list_files:
- time_stamp = self.unify_timestamp_format(metadata_selected[_file]["ingested"])
+ time_stamp = self.unify_timestamp_format(
+ metadata_selected[_file]["ingested"]
+ )
_metadata = []
for _metadata_name in self.get_list_metadata_selected():
- _metadata.append(str(metadata_selected[_file]["metadata"][_metadata_name]))
+ _metadata.append(
+ str(metadata_selected[_file]["metadata"][_metadata_name])
+ )
row_string = "{}, {}, {}".format(_file, time_stamp, ", ".join(_metadata))
value_metadata.append(row_string)
return value_metadata
@@ -130,12 +167,17 @@ def unify_timestamp_format(self, old_timestamp):
return new_timestamp
def create_metadata_name_row(self):
- name_metadata = ["#filename, timestamp_user_format, " + ", ".join(self.get_list_metadata_selected())]
+ name_metadata = [
+ "#filename, timestamp_user_format, "
+ + ", ".join(self.get_list_metadata_selected())
+ ]
return name_metadata
@staticmethod
def create_output_ascii_name(list_files, output_folder):
- output_ascii_file_name = os.path.basename(os.path.dirname(list_files[0]) + "_metadata_report_from_oncat.txt")
+ output_ascii_file_name = os.path.basename(
+ os.path.dirname(list_files[0]) + "_metadata_report_from_oncat.txt"
+ )
output_folder = os.path.abspath(output_folder)
return os.path.join(output_folder, output_ascii_file_name)
diff --git a/notebooks/__code/load_images.py b/notebooks/__code/load_images.py
index c1ea5cdd..6abc542e 100755
--- a/notebooks/__code/load_images.py
+++ b/notebooks/__code/load_images.py
@@ -37,7 +37,10 @@ def select_images(self, use_next=False, virtual_load=False):
# display(HTML('Select the images you want to work on!'))
self.list_images_ui = fileselector.FileSelectorPanel(
- instruction="Select Images...", multiple=True, next=next, start_dir=self.working_dir
+ instruction="Select Images...",
+ multiple=True,
+ next=next,
+ start_dir=self.working_dir,
)
self.list_images_ui.show()
diff --git a/notebooks/__code/match_images_shapes/load_images.py b/notebooks/__code/match_images_shapes/load_images.py
index 6ca060f3..9d43e33a 100755
--- a/notebooks/__code/match_images_shapes/load_images.py
+++ b/notebooks/__code/match_images_shapes/load_images.py
@@ -27,9 +27,16 @@ def select_images(self, use_next=False):
next = self.load_images
else:
next = None
- display(HTML('Select the images you want to work on!'))
+ display(
+ HTML(
+ 'Select the images you want to work on!'
+ )
+ )
self.list_images_ui = fileselector.FileSelectorPanel(
- instruction="Select Images...", multiple=True, next=next, start_dir=self.working_dir
+ instruction="Select Images...",
+ multiple=True,
+ next=next,
+ start_dir=self.working_dir,
)
self.list_images_ui.show()
diff --git a/notebooks/__code/match_images_shapes/main.py b/notebooks/__code/match_images_shapes/main.py
index 185b8be3..8e8bbd1d 100755
--- a/notebooks/__code/match_images_shapes/main.py
+++ b/notebooks/__code/match_images_shapes/main.py
@@ -33,7 +33,10 @@ def display_available_shapes(self):
self.dict_shapes[f"{_height}, {_width}"] = _shape
vertical_layout = widgets.VBox(
- [widgets.Label("Available shapes (height, width)"), widgets.RadioButtons(options=self.dict_shapes.keys())]
+ [
+ widgets.Label("Available shapes (height, width)"),
+ widgets.RadioButtons(options=self.dict_shapes.keys()),
+ ]
)
display(vertical_layout)
self.shape_dropdown_ui = vertical_layout.children[1]
@@ -46,7 +49,10 @@ def select_output_folder(self):
)
self.output_folder_ui = fileselector.FileSelectorPanel(
- instruction="Select Output Folder ...", start_dir=self.working_dir, type="directory", next=self.export
+ instruction="Select Output Folder ...",
+ start_dir=self.working_dir,
+ type="directory",
+ next=self.export,
)
self.output_folder_ui.show()
@@ -60,7 +66,9 @@ def export(self, output_folder):
format_selected = self.shape_dropdown_ui.value
height, width = self.dict_shapes[format_selected]
- new_output_folder = os.path.join(output_folder, f"{source_folder_name}_height{height}px_width{width}px")
+ new_output_folder = os.path.join(
+ output_folder, f"{source_folder_name}_height{height}px_width{width}px"
+ )
make_or_reset_folder(new_output_folder)
@@ -89,4 +97,10 @@ def export(self, output_folder):
w.value = _index + 1
w.close()
- display(HTML('Images created in ' + new_output_folder + ""))
+ display(
+ HTML(
+ 'Images created in '
+ + new_output_folder
+ + ""
+ )
+ )
diff --git a/notebooks/__code/math_images.py b/notebooks/__code/math_images.py
index 21bc8be1..7746a023 100755
--- a/notebooks/__code/math_images.py
+++ b/notebooks/__code/math_images.py
@@ -17,18 +17,24 @@ def __init__(self, working_dir=""):
def select_files(self):
self.files_list_widget = fileselector.FileSelectorPanel(
- instruction="select images to operate on", start_dir=self.working_dir, multiple=True
+ instruction="select images to operate on",
+ start_dir=self.working_dir,
+ multiple=True,
)
self.files_list_widget.show()
def select_target_image(self):
self.target_file = fileselector.FileSelectorPanel(
- instruction="select images to use in operation", start_dir=self.working_dir, multiple=False
+ instruction="select images to use in operation",
+ start_dir=self.working_dir,
+ multiple=False,
)
self.target_file.show()
def which_math(self):
- self.math_method = widgets.RadioButtons(options=["substract", "add"], value="substract")
+ self.math_method = widgets.RadioButtons(
+ options=["substract", "add"], value="substract"
+ )
display(self.math_method)
def recap(self):
diff --git a/notebooks/__code/mcp_chips_corrector/alignment.py b/notebooks/__code/mcp_chips_corrector/alignment.py
index bf59db86..37b095c6 100755
--- a/notebooks/__code/mcp_chips_corrector/alignment.py
+++ b/notebooks/__code/mcp_chips_corrector/alignment.py
@@ -3,7 +3,12 @@
import numpy as np
from scipy import interpolate
-from __code.mcp_chips_corrector import CHIP_CORRECTION, CHIP_GAP, MCP_LOW_MODE, NBR_OF_EDGES_PIXEL_TO_NOT_USE
+from __code.mcp_chips_corrector import (
+ CHIP_CORRECTION,
+ CHIP_GAP,
+ MCP_LOW_MODE,
+ NBR_OF_EDGES_PIXEL_TO_NOT_USE,
+)
class Alignment:
@@ -33,30 +38,65 @@ def fill_gaps(self, moved_image=None):
logging.info("--> filling gaps")
- self._fix_vertical_gap(moved_image=moved_image, first_chip_index=1, second_chip_index=2, image_mode="low")
-
- self._fix_vertical_gap(moved_image=moved_image, first_chip_index=3, second_chip_index=4, image_mode="low")
-
- self._fix_horizontal_gap(moved_image=moved_image, first_chip_index=1, second_chip_index=3, image_mode="low")
-
- self._fix_horizontal_gap(moved_image=moved_image, first_chip_index=2, second_chip_index=4, image_mode="low")
+ self._fix_vertical_gap(
+ moved_image=moved_image,
+ first_chip_index=1,
+ second_chip_index=2,
+ image_mode="low",
+ )
+
+ self._fix_vertical_gap(
+ moved_image=moved_image,
+ first_chip_index=3,
+ second_chip_index=4,
+ image_mode="low",
+ )
+
+ self._fix_horizontal_gap(
+ moved_image=moved_image,
+ first_chip_index=1,
+ second_chip_index=3,
+ image_mode="low",
+ )
+
+ self._fix_horizontal_gap(
+ moved_image=moved_image,
+ first_chip_index=2,
+ second_chip_index=4,
+ image_mode="low",
+ )
return moved_image
- def _fix_vertical_gap(self, moved_image=None, first_chip_index=1, second_chip_index=2, image_mode="low"):
- logging.info(f"---> working on vertical gap chip{first_chip_index}/chip{second_chip_index}")
+ def _fix_vertical_gap(
+ self,
+ moved_image=None,
+ first_chip_index=1,
+ second_chip_index=2,
+ image_mode="low",
+ ):
+ logging.info(
+ f"---> working on vertical gap chip{first_chip_index}/chip{second_chip_index}"
+ )
chip_a = self.get_chip(chip_index=first_chip_index)
chip_b = self.get_chip(chip_index=second_chip_index)
size_of_gap = CHIP_GAP[image_mode]
- x_axis_left = np.zeros(self.chip_height) + self.chip_width - 1 - NBR_OF_EDGES_PIXEL_TO_NOT_USE
+ x_axis_left = (
+ np.zeros(self.chip_height)
+ + self.chip_width
+ - 1
+ - NBR_OF_EDGES_PIXEL_TO_NOT_USE
+ )
y_axis_left = np.arange(self.chip_height)
if (first_chip_index == 1) and (second_chip_index == 2):
global_y_axis_left = y_axis_left
elif (first_chip_index == 3) and (second_chip_index == 4):
- global_y_axis_left = np.arange(self.chip_height) + self.chip_height + size_of_gap["yoffset"]
+ global_y_axis_left = (
+ np.arange(self.chip_height) + self.chip_height + size_of_gap["yoffset"]
+ )
x_axis_right = np.zeros(self.chip_height) + NBR_OF_EDGES_PIXEL_TO_NOT_USE
y_axis_right = y_axis_left
@@ -79,26 +119,45 @@ def _fix_vertical_gap(self, moved_image=None, first_chip_index=1, second_chip_in
# logging.debug(f"-----> x0:{x0}, x1:{x1}, value_x0:{intensity_left}, value_x1:{intensity_right}")
# logging.debug(f"-----> list_x_gap: {list_x_gap}")
list_intensity_gap = Alignment.get_interpolated_value(
- x0=x0, x1=x1, value_x0=intensity_left, value_x1=intensity_right, list_value_x=list_x_gap
+ x0=x0,
+ x1=x1,
+ value_x0=intensity_left,
+ value_x1=intensity_right,
+ list_value_x=list_x_gap,
)
# logging.debug(f"------> list_intensity_gap: {list_intensity_gap}")
for _x, _intensity in zip(list_x_gap, list_intensity_gap, strict=False):
moved_image[y, _x] = _intensity
- def _fix_horizontal_gap(self, moved_image=None, first_chip_index=1, second_chip_index=3, image_mode="low"):
- logging.info(f"---> working on horizontal gap chip{first_chip_index}/chip{second_chip_index}")
+ def _fix_horizontal_gap(
+ self,
+ moved_image=None,
+ first_chip_index=1,
+ second_chip_index=3,
+ image_mode="low",
+ ):
+ logging.info(
+ f"---> working on horizontal gap chip{first_chip_index}/chip{second_chip_index}"
+ )
chip_a = self.get_chip(chip_index=first_chip_index)
chip_b = self.get_chip(chip_index=second_chip_index)
size_of_gap = CHIP_GAP[image_mode]
x_axis_top = np.arange(self.chip_width)
- y_axis_top = np.zeros(self.chip_width) + self.chip_height - 1 - NBR_OF_EDGES_PIXEL_TO_NOT_USE
+ y_axis_top = (
+ np.zeros(self.chip_width)
+ + self.chip_height
+ - 1
+ - NBR_OF_EDGES_PIXEL_TO_NOT_USE
+ )
if (first_chip_index == 1) and (second_chip_index == 3):
global_x_axis_top = x_axis_top
elif (first_chip_index == 2) and (second_chip_index == 4):
- global_x_axis_top = np.arange(self.chip_width) + self.chip_width + size_of_gap["xoffset"]
+ global_x_axis_top = (
+ np.arange(self.chip_width) + self.chip_width + size_of_gap["xoffset"]
+ )
x_axis_bottom = x_axis_top
y_axis_bottom = np.zeros(self.chip_width) + NBR_OF_EDGES_PIXEL_TO_NOT_USE
@@ -118,10 +177,16 @@ def _fix_horizontal_gap(self, moved_image=None, first_chip_index=1, second_chip_
y1 = y_top + size_of_gap["yoffset"] + 1 + 2 * NBR_OF_EDGES_PIXEL_TO_NOT_USE
list_y_gap = np.arange(y0 + 1, y1)
- logging.debug(f"-----> y0:{y0}, y1:{y1}, value_y0:{intensity_top}, value_y1:{intensity_bottom}")
+ logging.debug(
+ f"-----> y0:{y0}, y1:{y1}, value_y0:{intensity_top}, value_y1:{intensity_bottom}"
+ )
logging.debug(f"-----> list_y_gap: {list_y_gap}")
list_intensity_gap = Alignment.get_interpolated_value(
- x0=y0, x1=y1, value_x0=intensity_top, value_x1=intensity_bottom, list_value_x=list_y_gap
+ x0=y0,
+ x1=y1,
+ value_x0=intensity_top,
+ value_x1=intensity_bottom,
+ list_value_x=list_y_gap,
)
logging.debug(f"------> list_intensity_gap: {list_intensity_gap}")
for _y, _intensity in zip(list_y_gap, list_intensity_gap, strict=False):
@@ -133,7 +198,10 @@ def move_chips(self, input_image=None):
if input_image is None:
raise ValueError("no input image provided to move_chips!")
- image_height, image_width = self.parent.image_size.height, self.parent.image_size.width
+ image_height, image_width = (
+ self.parent.image_size.height,
+ self.parent.image_size.width,
+ )
if image_height == MCP_LOW_MODE:
mode = "low"
@@ -150,26 +218,46 @@ def move_chips(self, input_image=None):
chip3 = self.get_chip(chip_index=3)
chip4 = self.get_chip(chip_index=4)
- list_xoffset = [CHIP_CORRECTION[mode][key]["xoffset"] for key in CHIP_CORRECTION[mode].keys()]
+ list_xoffset = [
+ CHIP_CORRECTION[mode][key]["xoffset"]
+ for key in CHIP_CORRECTION[mode].keys()
+ ]
max_xoffset = np.max(list_xoffset)
- list_yoffset = [CHIP_CORRECTION[mode][key]["yoffset"] for key in CHIP_CORRECTION[mode].keys()]
+ list_yoffset = [
+ CHIP_CORRECTION[mode][key]["yoffset"]
+ for key in CHIP_CORRECTION[mode].keys()
+ ]
max_yoffset = np.max(list_yoffset)
new_image = np.zeros((image_height + max_yoffset, image_width + max_xoffset))
new_image[0:chip_height, 0:chip_width] = chip1
new_image[
- CHIP_CORRECTION[mode][2]["yoffset"] : CHIP_CORRECTION[mode][2]["yoffset"] + chip_height,
- chip_width + CHIP_CORRECTION[mode][2]["xoffset"] : CHIP_CORRECTION[mode][2]["xoffset"] + 2 * chip_width,
+ CHIP_CORRECTION[mode][2]["yoffset"] : CHIP_CORRECTION[mode][2]["yoffset"]
+ + chip_height,
+ chip_width + CHIP_CORRECTION[mode][2]["xoffset"] : CHIP_CORRECTION[mode][2][
+ "xoffset"
+ ]
+ + 2 * chip_width,
] = chip2
new_image[
- CHIP_CORRECTION[mode][3]["yoffset"] + chip_height : CHIP_CORRECTION[mode][3]["yoffset"] + 2 * chip_height,
- CHIP_CORRECTION[mode][3]["xoffset"] : CHIP_CORRECTION[mode][3]["xoffset"] + chip_width,
+ CHIP_CORRECTION[mode][3]["yoffset"] + chip_height : CHIP_CORRECTION[mode][
+ 3
+ ]["yoffset"]
+ + 2 * chip_height,
+ CHIP_CORRECTION[mode][3]["xoffset"] : CHIP_CORRECTION[mode][3]["xoffset"]
+ + chip_width,
] = chip3
new_image[
- CHIP_CORRECTION[mode][4]["yoffset"] + chip_height : CHIP_CORRECTION[mode][4]["yoffset"] + 2 * chip_height,
- CHIP_CORRECTION[mode][4]["xoffset"] + chip_width : CHIP_CORRECTION[mode][4]["xoffset"] + 2 * chip_width,
+ CHIP_CORRECTION[mode][4]["yoffset"] + chip_height : CHIP_CORRECTION[mode][
+ 4
+ ]["yoffset"]
+ + 2 * chip_height,
+ CHIP_CORRECTION[mode][4]["xoffset"] + chip_width : CHIP_CORRECTION[mode][4][
+ "xoffset"
+ ]
+ + 2 * chip_width,
] = chip4
logging.debug(f"---> np.shape(new_image): {np.shape(new_image)}")
@@ -179,7 +267,10 @@ def move_chips(self, input_image=None):
def get_chip(self, chip_index=1):
raw_image = self.raw_image
- image_height, image_width = self.parent.image_size.height, self.parent.image_size.width
+ image_height, image_width = (
+ self.parent.image_size.height,
+ self.parent.image_size.width,
+ )
chip_width = self.chip_width
chip_height = self.chip_height
@@ -197,7 +288,10 @@ def get_chip(self, chip_index=1):
raise ValueError("chip index does not exist!")
def get_chip_size(self):
- image_height, image_width = self.parent.image_size.height, self.parent.image_size.width
+ image_height, image_width = (
+ self.parent.image_size.height,
+ self.parent.image_size.width,
+ )
mid_width = int(image_width / 2)
mid_height = int(image_height / 2)
@@ -205,7 +299,9 @@ def get_chip_size(self):
return {"width": mid_width, "height": mid_height}
@staticmethod
- def get_interpolated_value(x0=0, x1=1, value_x0=5, value_x1=10, list_value_x=[np.nan]):
+ def get_interpolated_value(
+ x0=0, x1=1, value_x0=5, value_x1=10, list_value_x=[np.nan]
+ ):
# logging.debug(f"x0:{x0}, x1:{x1}. value_x0:{value_x0}, value_x1:{value_x1}, list_value:{list_value_x}")
A = (x0, x1)
B = (value_x0, value_x1)
diff --git a/notebooks/__code/mcp_chips_corrector/event_handler.py b/notebooks/__code/mcp_chips_corrector/event_handler.py
index dee46bff..fd01cb7d 100755
--- a/notebooks/__code/mcp_chips_corrector/event_handler.py
+++ b/notebooks/__code/mcp_chips_corrector/event_handler.py
@@ -53,13 +53,16 @@ def mcp_alignment_correction(self):
if not first_update:
_histo_widget.setLevels(
- self.parent.alignment_view_histogram_level[0], self.parent.alignment_view_histogram_level[1]
+ self.parent.alignment_view_histogram_level[0],
+ self.parent.alignment_view_histogram_level[1],
)
_view_box.setState(_state)
def check_auto_fill_checkBox_widget(self):
- self.parent.ui.auto_fill_gaps_checkBox.setEnabled(self.parent.ui.apply_chips_alignment_correction.isChecked())
+ self.parent.ui.auto_fill_gaps_checkBox.setEnabled(
+ self.parent.ui.apply_chips_alignment_correction.isChecked()
+ )
def display_setup_image(self):
setup_image = self.parent.o_corrector.integrated_data
@@ -95,7 +98,13 @@ def display_chip_border(self, chip_index=0):
_pen = QPen()
_pen.setColor(COLOR_CONTOUR)
_pen.setWidthF(0.01)
- _roi_id = pg.ROI([x0, y0], [contour_width, contour_height], pen=_pen, scaleSnap=True, movable=False)
+ _roi_id = pg.ROI(
+ [x0, y0],
+ [contour_width, contour_height],
+ pen=_pen,
+ scaleSnap=True,
+ movable=False,
+ )
self.parent.setup_image_view.addItem(_roi_id)
self.parent.contour_id = _roi_id
@@ -140,7 +149,9 @@ def plot_profile(self):
y0 = self.parent.profile[profile_type]["y0"]
width = self.parent.profile[profile_type]["width"]
height = self.parent.profile[profile_type]["height"]
- nbr_pixels_to_exclude_on_each_side_of_chips_gap = self.parent.nbr_pixels_to_exclude_on_each_side_of_chips_gap
+ nbr_pixels_to_exclude_on_each_side_of_chips_gap = (
+ self.parent.nbr_pixels_to_exclude_on_each_side_of_chips_gap
+ )
data = self.parent.integrated_data[y0 : y0 + height, x0 : x0 + width]
if profile_type == "horizontal":
@@ -162,14 +173,24 @@ def plot_profile(self):
index_of_chip = self.o_get.get_index_of_chip_to_correct()
color_pen = Get.get_color_of_pen(
- gap_index=gap_index, index_of_chip=index_of_chip, profile_type=profile_type, x0=x0, y0=y0, x_axis=x_axis
+ gap_index=gap_index,
+ index_of_chip=index_of_chip,
+ profile_type=profile_type,
+ x0=x0,
+ y0=y0,
+ x_axis=x_axis,
)
self.coefficient_corrector_can_be_calculated = False
if len(where_is_gap_in_x_axis[0] > 0):
"the inter chips space falls within the profile selected"
- x_axis_other_chip, x_axis_working_chip, y_axis_other_chip, y_axis_working_chip = Get.get_x_y_ranges(
+ (
+ x_axis_other_chip,
+ x_axis_working_chip,
+ y_axis_other_chip,
+ y_axis_working_chip,
+ ) = Get.get_x_y_ranges(
index_of_chip,
profile_data,
profile_type,
@@ -183,19 +204,27 @@ def plot_profile(self):
self.x_axis_working_chip = x_axis_working_chip
self.x_axis_other_chip = x_axis_other_chip
- self.parent.profile_view.plot(x_axis_working_chip, y_axis_working_chip, pen=color_pen, symbol="o")
- self.parent.profile_view.plot(x_axis_other_chip, y_axis_other_chip, pen="w", symbol="o")
+ self.parent.profile_view.plot(
+ x_axis_working_chip, y_axis_working_chip, pen=color_pen, symbol="o"
+ )
+ self.parent.profile_view.plot(
+ x_axis_other_chip, y_axis_other_chip, pen="w", symbol="o"
+ )
if color_pen == "r":
self.coefficient_corrector_can_be_calculated = True
else:
- self.parent.profile_view.plot(x_axis, profile_data, pen=color_pen, symbol="o")
+ self.parent.profile_view.plot(
+ x_axis, profile_data, pen=color_pen, symbol="o"
+ )
pen = QPen()
pen.setColor(INTER_CHIPS)
pen.setWidthF(0.3)
- line = pg.InfiniteLine(pos=self.parent.image_size.width / 2, angle=90, pen=pen, label="Inter Chips")
+ line = pg.InfiniteLine(
+ pos=self.parent.image_size.width / 2, angle=90, pen=pen, label="Inter Chips"
+ )
self.parent.profile_view.addItem(line)
def calculate_coefficient_corrector(self):
@@ -243,7 +272,9 @@ def with_correction_tab(self):
else:
self.parent.ui.contrast_tabWidget.setTabEnabled(1, True)
- image_corrected = self.calculate_contrast_image(raw_image=self.parent.setup_live_image)
+ image_corrected = self.calculate_contrast_image(
+ raw_image=self.parent.setup_live_image
+ )
self.parent.corrected_live_image = image_corrected
self.display_contrast_image()
@@ -251,7 +282,9 @@ def calculate_contrast_image(self, raw_image=None):
setup_image = copy.deepcopy(raw_image)
if self.parent.ui.apply_contrast_correction_checkBox.isChecked():
- coefficient = float(str(self.parent.ui.coefficient_corrector_lineEdit.text()))
+ coefficient = float(
+ str(self.parent.ui.coefficient_corrector_lineEdit.text())
+ )
index_of_chip_to_correct = self.o_get.get_index_of_chip_to_correct()
gap_index = self.parent.image_size.gap_index
@@ -298,13 +331,18 @@ def display_contrast_image(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.corrected_histogram_level[0], self.parent.corrected_histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.corrected_histogram_level[0],
+ self.parent.corrected_histogram_level[1],
+ )
def update_result_tab(self):
if str(self.parent.ui.coefficient_corrector_lineEdit.text()) == "N/A":
image_corrected = self.parent.setup_live_image
else:
- image_corrected = self.calculate_contrast_image(raw_image=self.parent.setup_live_image)
+ image_corrected = self.calculate_contrast_image(
+ raw_image=self.parent.setup_live_image
+ )
o_align = Alignment(parent=self.parent, raw_image=image_corrected)
_image = o_align.correct()
_image = np.transpose(_image)
diff --git a/notebooks/__code/mcp_chips_corrector/export.py b/notebooks/__code/mcp_chips_corrector/export.py
index fc6f2975..0e8b3ce8 100755
--- a/notebooks/__code/mcp_chips_corrector/export.py
+++ b/notebooks/__code/mcp_chips_corrector/export.py
@@ -25,8 +25,12 @@ def correct_all_images(self):
QGuiApplication.processEvents() # to close QFileDialog
if export_folder:
- base_input_folder = os.path.basename(os.path.abspath(self.parent.o_corrector.input_working_folder))
- export_folder = os.path.join(export_folder, base_input_folder + "_corrected")
+ base_input_folder = os.path.basename(
+ os.path.abspath(self.parent.o_corrector.input_working_folder)
+ )
+ export_folder = os.path.join(
+ export_folder, base_input_folder + "_corrected"
+ )
make_or_reset_folder(export_folder)
logging.info("exporting all corrected images:")
@@ -48,12 +52,18 @@ def correct_all_images(self):
short_file_name = os.path.basename(working_list_files[_index_file])
file_extension = get_file_extension(short_file_name)
o_norm.data["sample"]["file_name"] = [short_file_name]
- o_norm.export(folder=export_folder, data_type="sample", file_type=file_extension)
+ o_norm.export(
+ folder=export_folder, data_type="sample", file_type=file_extension
+ )
self.parent.eventProgress.setValue(_index_file + 1)
QGuiApplication.processEvents()
- logging.info(f"-> exported file: {self.parent.o_corrector.working_list_files[_index_file]}")
+ logging.info(
+ f"-> exported file: {self.parent.o_corrector.working_list_files[_index_file]}"
+ )
- self.parent.ui.statusbar.showMessage(f"Corrected images are in folder {export_folder}", 10000)
+ self.parent.ui.statusbar.showMessage(
+ f"Corrected images are in folder {export_folder}", 10000
+ )
self.parent.ui.statusbar.setStyleSheet("color: blue")
self.parent.eventProgress.setVisible(False)
diff --git a/notebooks/__code/mcp_chips_corrector/get.py b/notebooks/__code/mcp_chips_corrector/get.py
index 76d581f8..4c8e81ca 100755
--- a/notebooks/__code/mcp_chips_corrector/get.py
+++ b/notebooks/__code/mcp_chips_corrector/get.py
@@ -80,10 +80,17 @@ def get_x_y_ranges(
x_axis_other_chip = x_axis[0 : where_is_gap - delta]
y_axis_other_chip = profile_data[0 : where_is_gap - delta]
- return x_axis_other_chip, x_axis_working_chip, y_axis_other_chip, y_axis_working_chip
+ return (
+ x_axis_other_chip,
+ x_axis_working_chip,
+ y_axis_other_chip,
+ y_axis_working_chip,
+ )
@staticmethod
- def get_color_of_pen(gap_index=0, index_of_chip=0, profile_type="horizontal", x0=0, y0=0, x_axis=None):
+ def get_color_of_pen(
+ gap_index=0, index_of_chip=0, profile_type="horizontal", x0=0, y0=0, x_axis=None
+ ):
"""
This method will give the color of the pen to use 'w' (white) or 'r' (red) according to the position of
the profile.
diff --git a/notebooks/__code/mcp_chips_corrector/interface.py b/notebooks/__code/mcp_chips_corrector/interface.py
index c8951578..68cec0fb 100755
--- a/notebooks/__code/mcp_chips_corrector/interface.py
+++ b/notebooks/__code/mcp_chips_corrector/interface.py
@@ -60,7 +60,8 @@ def __init__(self, parent=None, working_dir="", o_corrector=None):
super(Interface, self).__init__(parent)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), os.path.join("ui", "ui_mcp_chips_corrector.ui")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ os.path.join("ui", "ui_mcp_chips_corrector.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
diff --git a/notebooks/__code/mcp_chips_corrector/mcp_chips_corrector.py b/notebooks/__code/mcp_chips_corrector/mcp_chips_corrector.py
index 3548ea34..9c21c0a2 100755
--- a/notebooks/__code/mcp_chips_corrector/mcp_chips_corrector.py
+++ b/notebooks/__code/mcp_chips_corrector/mcp_chips_corrector.py
@@ -31,7 +31,9 @@ def load_data(self, folder_selected):
full_list_files = glob.glob(os.path.join(folder_selected, "*.tif*"))
full_list_files.sort()
- working_list_files = [file for file in full_list_files if "_SummedImg.fits" not in file]
+ working_list_files = [
+ file for file in full_list_files if "_SummedImg.fits" not in file
+ ]
o_norm = Normalization()
o_norm.load(file=working_list_files, notebook=True)
diff --git a/notebooks/__code/metadata_ascii_parser.py b/notebooks/__code/metadata_ascii_parser.py
index b284f477..25f388f5 100755
--- a/notebooks/__code/metadata_ascii_parser.py
+++ b/notebooks/__code/metadata_ascii_parser.py
@@ -9,7 +9,11 @@
from IPython.display import HTML, display
from ipywidgets import widgets
-from __code.file_handler import force_file_extension, get_file_extension, make_ascii_file_from_string
+from __code.file_handler import (
+ force_file_extension,
+ get_file_extension,
+ make_ascii_file_from_string,
+)
from __code.ipywe import fileselector
@@ -21,7 +25,10 @@ def __init__(self, working_dir="./"):
def select_folder(self, instruction="Select Input Folder ...", next=None):
self.input_folder_ui = fileselector.FileSelectorPanel(
- instruction=instruction, start_dir=self.working_dir, type="directory", next=next
+ instruction=instruction,
+ start_dir=self.working_dir,
+ type="directory",
+ next=next,
)
self.input_folder_ui.show()
@@ -31,7 +38,9 @@ def save_metadata_file(self, filename):
def select_metadata_file(self):
_instruction = "Select Metadata File ..."
self.metadata_ui = fileselector.FileSelectorPanel(
- instruction=_instruction, start_dir=self.working_dir, next=self.save_metadata_file
+ instruction=_instruction,
+ start_dir=self.working_dir,
+ next=self.save_metadata_file,
)
self.metadata_ui.show()
@@ -46,17 +55,25 @@ def __init__(
end_of_metadata_after_how_many_lines_from_reference_line=1,
):
self.filename = filename
- self.reference_line_showing_end_of_metadata = reference_line_showing_end_of_metadata
+ self.reference_line_showing_end_of_metadata = (
+ reference_line_showing_end_of_metadata
+ )
self.end_of_metadata_after_how_many_lines_from_reference_line = (
end_of_metadata_after_how_many_lines_from_reference_line
)
def calculate_nbr_row_metadata(self):
- file_handler = codecs.open(self.filename, "r", encoding="utf_8", errors="ignore")
+ file_handler = codecs.open(
+ self.filename, "r", encoding="utf_8", errors="ignore"
+ )
for _row_index, _row in enumerate(file_handler.readlines()):
if self.reference_line_showing_end_of_metadata in _row:
- self.nbr_row_metadata = _row_index + self.end_of_metadata_after_how_many_lines_from_reference_line + 1
+ self.nbr_row_metadata = (
+ _row_index
+ + self.end_of_metadata_after_how_many_lines_from_reference_line
+ + 1
+ )
class TimeInfoColumn:
@@ -99,7 +116,9 @@ def __init__(
end_of_metadata_after_how_many_lines_from_reference_line=1,
):
self.filename = filename
- self.reference_line_showing_end_of_metadata = reference_line_showing_end_of_metadata
+ self.reference_line_showing_end_of_metadata = (
+ reference_line_showing_end_of_metadata
+ )
self.end_of_metadata_after_how_many_lines_from_reference_line = (
end_of_metadata_after_how_many_lines_from_reference_line
)
@@ -115,15 +134,22 @@ def __init__(
def add_acquisition_started_time_to_timestamp(self):
str_acquisition_time = self.metadata_dict["Acquisition started on"]["value"]
- timestamp = time.mktime(datetime.datetime.strptime(str_acquisition_time, "%m/%d/%Y %H:%M:%S").timetuple())
+ timestamp = time.mktime(
+ datetime.datetime.strptime(
+ str_acquisition_time, "%m/%d/%Y %H:%M:%S"
+ ).timetuple()
+ )
new_column_values = self.o_pd.index.values + timestamp
self.o_pd = self.o_pd.set_index(new_column_values)
# user friendly time stamp format
user_format = [
- datetime.datetime.fromtimestamp(_time).strftime("%m/%d/%Y %H:%M:%S") for _time in self.o_pd.index.values
+ datetime.datetime.fromtimestamp(_time).strftime("%m/%d/%Y %H:%M:%S")
+ for _time in self.o_pd.index.values
]
- self.o_pd["timestamp_user_format"] = pd.Series(user_format, index=self.o_pd.index)
+ self.o_pd["timestamp_user_format"] = pd.Series(
+ user_format, index=self.o_pd.index
+ )
def set_time_info_as_index(self):
time_info_column = self.time_info_column
@@ -209,11 +235,15 @@ def read_specific_metadata(self):
for _keys in self.metadata_dict.keys():
for _line in self.metadata:
if _keys in _line:
- result = _line.split(self.metadata_dict[_keys]["split1"]) # 1st split
+ result = _line.split(
+ self.metadata_dict[_keys]["split1"]
+ ) # 1st split
if not self.metadata_dict[_keys]["split2"]:
self.metadata_dict[_keys]["value"] = result[1].strip()
else: # 2nd split
- [value, units] = result[1].strip().split(self.metadata_dict[_keys]["split2"])
+ [value, units] = (
+ result[1].strip().split(self.metadata_dict[_keys]["split2"])
+ )
self.metadata_dict[_keys]["value"] = value.strip()
self.metadata_dict[_keys]["units"] = units.strip()
@@ -264,8 +294,12 @@ def get_list_columns(self):
def parse(self):
if self.meta_type == "mpt":
- time_info_column = TimeInfoColumn(label=self.time_label, index=self.time_index)
- o_mpt = MPTFileParser(filename=self.filename, time_info_column=time_info_column)
+ time_info_column = TimeInfoColumn(
+ label=self.time_label, index=self.time_index
+ )
+ o_mpt = MPTFileParser(
+ filename=self.filename, time_info_column=time_info_column
+ )
self.meta = o_mpt
else:
raise NotImplementedError("This file format is not supported!")
@@ -275,7 +309,9 @@ def keep_only_columns_of_data_of_interest(self, list_columns_names=[]):
list_columns_names = list(self.box.children[1].value)
if list_columns_names:
- self.data_to_keep = self.meta.keep_only_columns_of_data_of_interest(list_columns_names=list_columns_names)
+ self.data_to_keep = self.meta.keep_only_columns_of_data_of_interest(
+ list_columns_names=list_columns_names
+ )
else:
self.data_to_keep = []
@@ -310,7 +346,10 @@ def select_data_to_keep(self, default_selection=[-1]):
layout=widgets.Layout(width="30%"),
),
widgets.SelectMultiple(
- options=list_columns, value=default_value, rows=10, layout=widgets.Layout(width="30%")
+ options=list_columns,
+ value=default_value,
+ rows=10,
+ layout=widgets.Layout(width="30%"),
),
]
)
@@ -321,7 +360,9 @@ def select_output_location(self, default_filename=""):
[filename, ext] = os.path.splitext(self.short_filename)
[_, nbr_columns] = np.shape(self.data_to_keep)
p = inflect.engine()
- default_filename = filename + f"_{nbr_columns}" + p.plural("column", nbr_columns)
+ default_filename = (
+ filename + f"_{nbr_columns}" + p.plural("column", nbr_columns)
+ )
self.box2 = widgets.HBox(
[
@@ -333,7 +374,9 @@ def select_output_location(self, default_filename=""):
display(self.box2)
o_folder = MetadataAsciiParser(working_dir=self.working_dir)
- o_folder.select_folder(instruction="Select Output Folder:", next=self.__export_table)
+ o_folder.select_folder(
+ instruction="Select Output Folder:", next=self.__export_table
+ )
def __export_table(self, folder):
display(
@@ -344,7 +387,11 @@ def __export_table(self, folder):
)
)
- display(HTML('Work in progress! ... '))
+ display(
+ HTML(
+ 'Work in progress! ... '
+ )
+ )
output_filename = self.box2.children[1].value
output_filename = force_file_extension(output_filename, ".txt")
@@ -353,11 +400,15 @@ def __export_table(self, folder):
# record metadata selected
metadata_name_selected = np.array(self.box.children[1].value)
- metadata_name_selected = np.append(metadata_name_selected, "timestamp_user_format")
+ metadata_name_selected = np.append(
+ metadata_name_selected, "timestamp_user_format"
+ )
data = self.get_data()
self.data_to_export = data[metadata_name_selected]
- self.export_table(data=self.data_to_export, folder=folder, filename=output_filename)
+ self.export_table(
+ data=self.data_to_export, folder=folder, filename=output_filename
+ )
def export_table(self, data=None, folder="", filename=""):
full_output_filename = os.path.join(os.path.abspath(folder), filename)
@@ -375,7 +426,11 @@ def export_table(self, data=None, folder="", filename=""):
display(HTML('Done!'))
display(
- HTML('Output file created: ' + full_output_filename + "")
+ HTML(
+ 'Output file created: '
+ + full_output_filename
+ + ""
+ )
)
diff --git a/notebooks/__code/metadata_handler.py b/notebooks/__code/metadata_handler.py
index 4489882f..679132e4 100755
--- a/notebooks/__code/metadata_handler.py
+++ b/notebooks/__code/metadata_handler.py
@@ -21,7 +21,11 @@ def get_time_stamp(file_name="", ext="tif"):
except:
time_stamp = o_dict[65000]
- time_stamp = MetadataHandler._convert_epics_timestamp_to_rfc3339_timestamp(time_stamp)
+ time_stamp = (
+ MetadataHandler._convert_epics_timestamp_to_rfc3339_timestamp(
+ time_stamp
+ )
+ )
except:
time_stamp = os.path.getmtime(file_name)
@@ -103,7 +107,9 @@ def retrieve_metadata(list_files=[], list_metadata=[], using_enum_object=False):
_dict = OrderedDict()
for _file in list_files:
_meta = MetadataHandler.get_metadata(
- filename=_file, list_metadata=list_metadata, using_enum_object=using_enum_object
+ filename=_file,
+ list_metadata=list_metadata,
+ using_enum_object=using_enum_object,
)
_dict[_file] = _meta
@@ -129,7 +135,9 @@ def get_value_of_metadata_key(filename="", list_key=None):
return result
@staticmethod
- def retrieve_value_of_metadata_key(list_files=[], list_key=[], is_from_notebook=False):
+ def retrieve_value_of_metadata_key(
+ list_files=[], list_key=[], is_from_notebook=False
+ ):
if list_files == []:
return {}
@@ -139,7 +147,9 @@ def retrieve_value_of_metadata_key(list_files=[], list_key=[], is_from_notebook=
_dict = OrderedDict()
for _index, _file in enumerate(list_files):
- _meta = MetadataHandler.get_value_of_metadata_key(filename=_file, list_key=list_key)
+ _meta = MetadataHandler.get_value_of_metadata_key(
+ filename=_file, list_key=list_key
+ )
_dict[_file] = _meta
if is_from_notebook:
progress_bar.value = _index
diff --git a/notebooks/__code/metadata_overlapping_images/advanced_table_handler.py b/notebooks/__code/metadata_overlapping_images/advanced_table_handler.py
index 8c1b644a..538e8b62 100755
--- a/notebooks/__code/metadata_overlapping_images/advanced_table_handler.py
+++ b/notebooks/__code/metadata_overlapping_images/advanced_table_handler.py
@@ -38,7 +38,9 @@ def add_metadata(self):
try:
value = float(value_str)
except ValueError:
- self.ui.statusbar.showMessage("This metadata can not be used - not a float value!", 10000)
+ self.ui.statusbar.showMessage(
+ "This metadata can not be used - not a float value!", 10000
+ )
self.ui.statusbar.setStyleSheet("color: red")
return
else:
@@ -59,7 +61,9 @@ def coefficient_changed(self):
def update_tableWidget(self):
list_metadata_index_selected = self.list_metatata_index_selected
list_files = self.parent.data_dict["file_name"]
- list_lineedit_ui_in_formula_tableWidget = self.list_lineedit_ui_in_formula_tableWidget
+ list_lineedit_ui_in_formula_tableWidget = (
+ self.list_lineedit_ui_in_formula_tableWidget
+ )
o_table = TableHandler(table_ui=self.ui.tableWidget)
for _row, _file in enumerate(list_files):
@@ -79,21 +83,29 @@ def update_tableWidget(self):
try:
value = float(value_str)
except ValueError:
- self.ui.statusbar.showMessage("This metadata can not be used - not a float value!", 10000)
+ self.ui.statusbar.showMessage(
+ "This metadata can not be used - not a float value!", 10000
+ )
self.ui.statusbar.setStyleSheet("color: red")
return
value = float(value)
try:
- coefficient = float(list_lineedit_ui_in_formula_tableWidget[_column].text())
+ coefficient = float(
+ list_lineedit_ui_in_formula_tableWidget[_column].text()
+ )
except ValueError:
- self.ui.statusbar.showMessage(f"Coefficient in column {_column} is wrong!", 10000)
+ self.ui.statusbar.showMessage(
+ f"Coefficient in column {_column} is wrong!", 10000
+ )
self.ui.statusbar.setStyleSheet("color: red")
return
global_value += coefficient * value
if there_is_at_least_one_column:
- o_table.insert_item(row=_row, column=1, value=global_value, editable=False)
+ o_table.insert_item(
+ row=_row, column=1, value=global_value, editable=False
+ )
self.ui.statusbar.showMessage("Table refreshed with new formula!", 10000)
self.ui.statusbar.setStyleSheet("color: green")
@@ -174,7 +186,9 @@ def list_of_metadata(self):
def file_name_value_table(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
list_files_full_name = self.top_parent.data_dict["file_name"]
- list_files_short_name = [os.path.basename(_file) for _file in list_files_full_name]
+ list_files_short_name = [
+ os.path.basename(_file) for _file in list_files_full_name
+ ]
o_table.insert_empty_column(0)
o_table.insert_empty_column(1)
diff --git a/notebooks/__code/metadata_overlapping_images/display.py b/notebooks/__code/metadata_overlapping_images/display.py
index 84715784..7f87e9e4 100755
--- a/notebooks/__code/metadata_overlapping_images/display.py
+++ b/notebooks/__code/metadata_overlapping_images/display.py
@@ -20,7 +20,8 @@ def get_image_selected(self, recalculate_image=False):
angle = self.parent.rotation_angle
# rotate all images
self.parent.data_dict["data"] = [
- transform.rotate(_image, angle) for _image in self.parent.data_dict_raw["data"]
+ transform.rotate(_image, angle)
+ for _image in self.parent.data_dict_raw["data"]
]
_image = self.parent.data_dict["data"][slider_index]
@@ -44,7 +45,9 @@ def images(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
class DisplayScalePyqtUi:
@@ -81,7 +84,10 @@ def run(self, save_it=True):
adj = []
x0 = self.parent.ui.scale_position_x.value()
- y0 = self.parent.ui.scale_position_y.maximum() - self.parent.ui.scale_position_y.value()
+ y0 = (
+ self.parent.ui.scale_position_y.maximum()
+ - self.parent.ui.scale_position_y.value()
+ )
one_edge = [x0, y0]
if self.parent.ui.scale_horizontal_orientation.isChecked():
@@ -109,7 +115,13 @@ def run(self, save_it=True):
line_color = tuple(list_line_color)
lines = np.array(
[line_color for n in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
scale = pg.GraphItem()
@@ -125,7 +137,11 @@ def run(self, save_it=True):
legend = o_get.scale_legend()
color = o_get.color(source="scale", color_type="html")
text = pg.TextItem(
- html='' + legend + "
",
+ html=''
+ + legend
+ + "
",
angle=angle,
)
view.addItem(text)
@@ -190,7 +206,10 @@ def display_text(self, save_it=True, metadata_index=1):
font_size = self.list_ui[metadata_index]["font_size_slider"].value()
x0 = self.list_ui[metadata_index]["position_x"].value()
- y0 = self.list_ui[metadata_index]["position_y"].maximum() - self.list_ui[metadata_index]["position_y"].value()
+ y0 = (
+ self.list_ui[metadata_index]["position_y"].maximum()
+ - self.list_ui[metadata_index]["position_y"].value()
+ )
o_get = Get(parent=self.parent)
metadata_text = o_get.metadata_text(metadata_index=metadata_index)
@@ -221,18 +240,26 @@ def display_text(self, save_it=True, metadata_index=1):
def clean_and_format_x_axis(self, x_axis=None):
x_axis_column_index = self.parent.x_axis_column_index
metadata_operation = self.parent.metadata_operation[x_axis_column_index]
- x_axis = self.clean_and_format_axis(metadata_operation=metadata_operation, input_axis=x_axis)
+ x_axis = self.clean_and_format_axis(
+ metadata_operation=metadata_operation, input_axis=x_axis
+ )
return x_axis
def clean_and_format_y_axis(self, y_axis=None):
y_axis_column_index = self.parent.y_axis_column_index
metadata_operation = self.parent.metadata_operation[y_axis_column_index]
- y_axis = self.clean_and_format_axis(metadata_operation=metadata_operation, input_axis=y_axis)
+ y_axis = self.clean_and_format_axis(
+ metadata_operation=metadata_operation, input_axis=y_axis
+ )
return y_axis
def clean_and_format_axis(self, metadata_operation=None, input_axis=None):
- first_part_of_string_to_remove = metadata_operation["first_part_of_string_to_remove"]
- last_part_of_string_to_remove = metadata_operation["last_part_of_string_to_remove"]
+ first_part_of_string_to_remove = metadata_operation[
+ "first_part_of_string_to_remove"
+ ]
+ last_part_of_string_to_remove = metadata_operation[
+ "last_part_of_string_to_remove"
+ ]
math_1 = metadata_operation["math_1"]
math_2 = metadata_operation["math_2"]
value_1 = metadata_operation["value_1"]
@@ -250,7 +277,11 @@ def clean_and_format_axis(self, metadata_operation=None, input_axis=None):
return None
value_cleaned_math = linear_operation(
- input_parameter=value_cleaned, math_1=math_1, math_2=math_2, value_1=value_1, value_2=value_2
+ input_parameter=value_cleaned,
+ math_1=math_1,
+ math_2=math_2,
+ value_1=value_1,
+ value_2=value_2,
)
if value_cleaned_math == "":
@@ -315,9 +346,7 @@ def display_graph(self, save_it=True):
x_name = self.parent.ui.graph_x_axis_name.text()
x_unit = self.parent.ui.graph_x_axis_units.text()
if x_unit:
- x_axis_label = (
- f'{x_name} ({x_unit})'
- )
+ x_axis_label = f'
{x_name} ({x_unit})'
else:
x_axis_label = f'
{x_name}'
@@ -344,11 +373,16 @@ def display_graph(self, save_it=True):
)
if self.parent.ui.display_red_vertical_marker_checkbox.isChecked():
- _inf_line = pg.InfiniteLine(clean_and_format_x_axis[current_index], pen=_pen)
+ _inf_line = pg.InfiniteLine(
+ clean_and_format_x_axis[current_index], pen=_pen
+ )
graph.addItem(_inf_line)
x0 = self.parent.ui.graph_position_x.value()
- y0 = self.parent.ui.graph_position_y.maximum() - self.parent.ui.graph_position_y.value()
+ y0 = (
+ self.parent.ui.graph_position_y.maximum()
+ - self.parent.ui.graph_position_y.value()
+ )
view.addItem(graph)
graph.setPos(x0, y0)
diff --git a/notebooks/__code/metadata_overlapping_images/event_handler.py b/notebooks/__code/metadata_overlapping_images/event_handler.py
index 3cfad8f8..b14b44c9 100755
--- a/notebooks/__code/metadata_overlapping_images/event_handler.py
+++ b/notebooks/__code/metadata_overlapping_images/event_handler.py
@@ -43,7 +43,9 @@ def right_click(self, position=None):
action = menu.exec_(QtGui.QCursor.pos())
if action == _set_new_metadata:
- o_selector = MetadataSelectorHandler(parent=self.parent, column=column_selected)
+ o_selector = MetadataSelectorHandler(
+ parent=self.parent, column=column_selected
+ )
o_selector.show()
elif action == _x_axis:
@@ -68,7 +70,9 @@ def metadata_list_changed(self, index, column):
o_dict = dict(o_image.tag_v2)
value = o_dict[float(key_selected)]
- new_value = self.perform_cleaning_and_math_on_metadata(column=column, value=value)
+ new_value = self.perform_cleaning_and_math_on_metadata(
+ column=column, value=value
+ )
self.parent.ui.tableWidget.item(row, column).setText(f"{new_value}")
self.parent.eventProgress.setValue(row + 1)
@@ -80,8 +84,12 @@ def metadata_list_changed(self, index, column):
def perform_cleaning_and_math_on_metadata(self, column=1, value=""):
metadata_operation = self.parent.metadata_operation
- first_part_of_string_to_remove = metadata_operation[column]["first_part_of_string_to_remove"]
- last_part_of_string_to_remove = metadata_operation[column]["last_part_of_string_to_remove"]
+ first_part_of_string_to_remove = metadata_operation[column][
+ "first_part_of_string_to_remove"
+ ]
+ last_part_of_string_to_remove = metadata_operation[column][
+ "last_part_of_string_to_remove"
+ ]
string_cleaned = string_cleaning(
first_part_of_string_to_remove=first_part_of_string_to_remove,
last_part_of_string_to_remove=last_part_of_string_to_remove,
@@ -90,11 +98,17 @@ def perform_cleaning_and_math_on_metadata(self, column=1, value=""):
value_1 = metadata_operation[column]["value_1"]
value_2 = metadata_operation[column]["value_2"]
- if is_linear_operation_valid(input_parameter=string_cleaned, value_1=value_1, value_2=value_2):
+ if is_linear_operation_valid(
+ input_parameter=string_cleaned, value_1=value_1, value_2=value_2
+ ):
math_1 = metadata_operation[column]["math_1"]
math_2 = metadata_operation[column]["math_2"]
result_linear_operation = linear_operation(
- input_parameter=string_cleaned, math_1=math_1, value_1=value_1, math_2=math_2, value_2=value_2
+ input_parameter=string_cleaned,
+ math_1=math_1,
+ value_1=value_1,
+ math_2=math_2,
+ value_2=value_2,
)
else:
return string_cleaned
diff --git a/notebooks/__code/metadata_overlapping_images/export_images.py b/notebooks/__code/metadata_overlapping_images/export_images.py
index 2c9eeb13..4cf00a61 100755
--- a/notebooks/__code/metadata_overlapping_images/export_images.py
+++ b/notebooks/__code/metadata_overlapping_images/export_images.py
@@ -36,8 +36,12 @@ def run(self):
exporter = pyqtgraph.exporters.ImageExporter(self.parent.ui.image_view.view)
- exporter.params.param("width").setValue(2024, blockSignal=exporter.widthChanged)
- exporter.params.param("height").setValue(2014, blockSignal=exporter.heightChanged)
+ exporter.params.param("width").setValue(
+ 2024, blockSignal=exporter.widthChanged
+ )
+ exporter.params.param("height").setValue(
+ 2014, blockSignal=exporter.heightChanged
+ )
exporter.export(output_file_name)
diff --git a/notebooks/__code/metadata_overlapping_images/export_table.py b/notebooks/__code/metadata_overlapping_images/export_table.py
index 1add654a..2151d4a6 100755
--- a/notebooks/__code/metadata_overlapping_images/export_table.py
+++ b/notebooks/__code/metadata_overlapping_images/export_table.py
@@ -16,14 +16,21 @@ def _create_output_file_name(self):
working_dir = self.parent.working_dir
base_working_dir = os.path.basename(working_dir)
- full_file_name = os.path.join(self.export_folder, base_working_dir + "_metadata_table.txt")
+ full_file_name = os.path.join(
+ self.export_folder, base_working_dir + "_metadata_table.txt"
+ )
return full_file_name
def run(self):
full_output_file_name = self._create_output_file_name()
metadata = self.create_metadata_array()
data = self.create_data_array()
- make_ascii_file(metadata=metadata, data=data, output_file_name=full_output_file_name, dim="1d")
+ make_ascii_file(
+ metadata=metadata,
+ data=data,
+ output_file_name=full_output_file_name,
+ dim="1d",
+ )
show_status_message(
parent=self.parent,
@@ -69,9 +76,13 @@ def format_math(metadata_operation, metadata_axis="x_axis"):
metadata.append(f"# Metadata {metadata_axis} operation: None")
else:
if value_2 == "":
- metadata.append(f"# Metadata {metadata_axis} operation: {math_1} {value_1}")
+ metadata.append(
+ f"# Metadata {metadata_axis} operation: {math_1} {value_1}"
+ )
else:
- metadata.append(f"# Metadata {metadata_axis} operation: {math_1} {value_1} {math_2} {value_2}")
+ metadata.append(
+ f"# Metadata {metadata_axis} operation: {math_1} {value_1} {math_2} {value_2}"
+ )
format_math(x_axis_metadata_operation, metadata_axis="x_axis")
format_math(y_axis_metadata_operation, metadata_axis="y_axis")
diff --git a/notebooks/__code/metadata_overlapping_images/get.py b/notebooks/__code/metadata_overlapping_images/get.py
index ed91330c..e0c64e73 100755
--- a/notebooks/__code/metadata_overlapping_images/get.py
+++ b/notebooks/__code/metadata_overlapping_images/get.py
@@ -47,7 +47,9 @@ def metadata_column(self):
try:
_row_value = float(_row_str)
except:
- self.parent.ui.statusbar.showMessage("Error Displaying Metadata Graph!", 10000)
+ self.parent.ui.statusbar.showMessage(
+ "Error Displaying Metadata Graph!", 10000
+ )
self.parent.ui.statusbar.setStyleSheet("color: red")
return []
@@ -101,7 +103,9 @@ def metadata_text(self, metadata_index=1):
slider_index = self.parent.ui.file_slider.value()
index_of_y_axis = self.parent.y_axis_column_index
- metadata_value = str(self.parent.ui.tableWidget.item(slider_index, index_of_y_axis).text())
+ metadata_value = str(
+ self.parent.ui.tableWidget.item(slider_index, index_of_y_axis).text()
+ )
if metadata_name.strip() == "":
return f"{metadata_value} {metadata_units}"
else:
@@ -123,7 +127,9 @@ def raw_metadata_column(self):
def color(self, color_type="html", source="metadata"):
if source == "metadata":
- color_selected = self.parent.ui.metadata_color_combobox.currentText().lower()
+ color_selected = (
+ self.parent.ui.metadata_color_combobox.currentText().lower()
+ )
elif source == "graph":
color_selected = self.parent.ui.graph_color_combobox.currentText().lower()
else:
diff --git a/notebooks/__code/metadata_overlapping_images/initialization.py b/notebooks/__code/metadata_overlapping_images/initialization.py
index 81d4f385..ef37dacc 100755
--- a/notebooks/__code/metadata_overlapping_images/initialization.py
+++ b/notebooks/__code/metadata_overlapping_images/initialization.py
@@ -6,7 +6,10 @@
from qtpy.QtWidgets import QProgressBar, QTableWidgetItem, QVBoxLayout
from __code.file_handler import retrieve_time_stamp
-from __code.metadata_overlapping_images.general_classes import MetadataSettings, ScaleSettings
+from __code.metadata_overlapping_images.general_classes import (
+ MetadataSettings,
+ ScaleSettings,
+)
from .get import Get
@@ -33,7 +36,9 @@ def statusbar(self):
def table(self):
# init the summary table
list_files_full_name = self.parent.data_dict["file_name"]
- list_files_short_name = [os.path.basename(_file) for _file in list_files_full_name]
+ list_files_short_name = [
+ os.path.basename(_file) for _file in list_files_full_name
+ ]
self.parent.ui.tableWidget.blockSignals(True)
for _row, _file in enumerate(list_files_short_name):
@@ -62,7 +67,9 @@ def widgets(self):
# update size of table columns
nbr_columns = self.parent.ui.tableWidget.columnCount()
for _col in range(nbr_columns):
- self.parent.ui.tableWidget.setColumnWidth(_col, self.parent.guide_table_width[_col])
+ self.parent.ui.tableWidget.setColumnWidth(
+ _col, self.parent.guide_table_width[_col]
+ )
# populate list of metadata if file is a tiff
o_get = Get(parent=self.parent)
@@ -73,7 +80,9 @@ def widgets(self):
self.parent.ui.select_metadata_combobox.setVisible(False)
# list of scale available
- self.parent.ui.scale_units_combobox.addItems(self.parent.list_scale_units["string"])
+ self.parent.ui.scale_units_combobox.addItems(
+ self.parent.list_scale_units["string"]
+ )
# pixel size range
[height, width] = np.shape(self.parent.data_dict["data"][0])
diff --git a/notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py b/notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py
index b237fbdd..2d26006f 100755
--- a/notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py
+++ b/notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py
@@ -85,7 +85,10 @@ class MetadataOverlappingImagesUi(QMainWindow):
list_metadata = []
dict_list_metadata = OrderedDict() # {0: '10', 1: 'hfir', ...}
list_scale_units = ["mm", "\u00b5m", "nm"]
- list_scale_units = {"string": ["mm", "\u00b5m", "nm"], "html": ["mm", "µm", "nm"]}
+ list_scale_units = {
+ "string": ["mm", "\u00b5m", "nm"],
+ "html": ["mm", "µm", "nm"],
+ }
rgba_color = {
"white": (255, 255, 255, 255, None),
@@ -103,7 +106,13 @@ class MetadataOverlappingImagesUi(QMainWindow):
"black": (0, 0, 0),
}
- html_color = {"white": "#FFF", "red": "#F00", "green": "#0F0", "blue": "#00F", "black": "#000"}
+ html_color = {
+ "white": "#FFF",
+ "red": "#F00",
+ "green": "#0F0",
+ "blue": "#00F",
+ "black": "#000",
+ }
# ui of pop up window that allows to define metadata column value (format it)
metadata_string_format_ui = None
diff --git a/notebooks/__code/metadata_overlapping_images/metadata_selector_handler.py b/notebooks/__code/metadata_overlapping_images/metadata_selector_handler.py
index 302f0975..ee5caad7 100755
--- a/notebooks/__code/metadata_overlapping_images/metadata_selector_handler.py
+++ b/notebooks/__code/metadata_overlapping_images/metadata_selector_handler.py
@@ -32,14 +32,24 @@ def initialization(self):
self.ui.select_metadata_combobox.addItems(list_metadata)
metadata_operation = self.parent.metadata_operation[self.column]
- self.ui.select_metadata_combobox.setCurrentIndex(metadata_operation["index_of_metadata"])
- self.ui.first_part_lineEdit.setText(metadata_operation["first_part_of_string_to_remove"])
- self.ui.second_part_lineEdit.setText(metadata_operation["last_part_of_string_to_remove"])
+ self.ui.select_metadata_combobox.setCurrentIndex(
+ metadata_operation["index_of_metadata"]
+ )
+ self.ui.first_part_lineEdit.setText(
+ metadata_operation["first_part_of_string_to_remove"]
+ )
+ self.ui.second_part_lineEdit.setText(
+ metadata_operation["last_part_of_string_to_remove"]
+ )
self.ui.linear_operation_lineEdit_1.setText(metadata_operation["value_1"])
self.ui.linear_operation_lineEdit_2.setText(metadata_operation["value_2"])
- math_1_index = self.ui.linear_operation_comboBox_1.findText(metadata_operation["math_1"])
+ math_1_index = self.ui.linear_operation_comboBox_1.findText(
+ metadata_operation["math_1"]
+ )
self.ui.linear_operation_comboBox_1.setCurrentIndex(math_1_index)
- math_2_index = self.ui.linear_operation_comboBox_2.findText(metadata_operation["math_2"])
+ math_2_index = self.ui.linear_operation_comboBox_2.findText(
+ metadata_operation["math_2"]
+ )
self.ui.linear_operation_comboBox_2.setCurrentIndex(math_2_index)
def string_cleaning_changed(self, new_text=None):
@@ -74,7 +84,9 @@ def linear_operation_lineedit_changed(self, new_string=None):
self.ui.linear_operation_value_after.setText("N/A")
return
- input_parameter = float(str(self.ui.linear_operation_value_before.text()).strip())
+ input_parameter = float(
+ str(self.ui.linear_operation_value_before.text()).strip()
+ )
math_1 = str(self.ui.linear_operation_comboBox_1.currentText())
value_1 = str(self.ui.linear_operation_lineEdit_1.text()).strip()
math_2 = str(self.ui.linear_operation_comboBox_2.currentText())
@@ -95,7 +107,10 @@ def linear_operation_combobox_changed(self, new_string=None):
self.update_final_result()
def update_final_result(self):
- if self.is_before_linear_operation_is_valid() and self.is_linear_operation_valid():
+ if (
+ self.is_before_linear_operation_is_valid()
+ and self.is_linear_operation_valid()
+ ):
result = self.ui.linear_operation_value_after.text()
else:
result = self.ui.linear_operation_value_before.text()
@@ -161,8 +176,12 @@ def result_of_checking_operation(ui=None):
is_error_in_operation = True
return is_error_in_operation
- is_error_operation_1 = result_of_checking_operation(ui=self.ui.linear_operation_lineEdit_1)
- is_error_operation_2 = result_of_checking_operation(ui=self.ui.linear_operation_lineEdit_2)
+ is_error_operation_1 = result_of_checking_operation(
+ ui=self.ui.linear_operation_lineEdit_1
+ )
+ is_error_operation_2 = result_of_checking_operation(
+ ui=self.ui.linear_operation_lineEdit_2
+ )
self.ui.error_label_1.setVisible(is_error_operation_1)
self.ui.error_label_2.setVisible(is_error_operation_2)
diff --git a/notebooks/__code/metadata_overlapping_images/metadata_string_format_handler.py b/notebooks/__code/metadata_overlapping_images/metadata_string_format_handler.py
index 3de4eaf6..87b5810e 100755
--- a/notebooks/__code/metadata_overlapping_images/metadata_string_format_handler.py
+++ b/notebooks/__code/metadata_overlapping_images/metadata_string_format_handler.py
@@ -94,14 +94,18 @@ def init_table_size(self):
def init_table(self):
list_files_full_name = self.parent.data_dict["file_name"]
- list_files_short_name = [os.path.basename(_file) for _file in list_files_full_name]
+ list_files_short_name = [
+ os.path.basename(_file) for _file in list_files_full_name
+ ]
main_table_metadata_column = self.parent.get_raw_metadata_column()
for _row, _file in enumerate(list_files_short_name):
self.ui.tableWidget.insertRow(_row)
self.set_item_table(row=_row, col=0, value=_file)
- self.set_item_table(row=_row, col=1, value=main_table_metadata_column[_row], editable=True)
+ self.set_item_table(
+ row=_row, col=1, value=main_table_metadata_column[_row], editable=True
+ )
def set_item_table(self, row=0, col=0, value="", editable=False):
item = QtGui.QTableWidgetItem(str(value))
diff --git a/notebooks/__code/metadata_overlapping_images/table_loader.py b/notebooks/__code/metadata_overlapping_images/table_loader.py
index af2acd4b..2505f231 100755
--- a/notebooks/__code/metadata_overlapping_images/table_loader.py
+++ b/notebooks/__code/metadata_overlapping_images/table_loader.py
@@ -10,7 +10,9 @@ def __init__(self, parent=None, filename=""):
self.filename = filename
def load_table(self):
- table = pd.read_csv(self.filename, sep=",", comment="#", names=["filename", "metadata"])
+ table = pd.read_csv(
+ self.filename, sep=",", comment="#", names=["filename", "metadata"]
+ )
table_dict = {}
for _row in table.values:
_key, _value = _row
diff --git a/notebooks/__code/metadata_overlapping_images/utilities.py b/notebooks/__code/metadata_overlapping_images/utilities.py
index e0575a51..2fd17794 100755
--- a/notebooks/__code/metadata_overlapping_images/utilities.py
+++ b/notebooks/__code/metadata_overlapping_images/utilities.py
@@ -3,7 +3,11 @@
from . import LIST_FUNNY_CHARACTERS
-def string_cleaning(first_part_of_string_to_remove="", last_part_of_string_to_remove="", string_to_clean=""):
+def string_cleaning(
+ first_part_of_string_to_remove="",
+ last_part_of_string_to_remove="",
+ string_to_clean="",
+):
first_part_of_string_to_remove = first_part_of_string_to_remove
_clean_first_part = ""
for _c in first_part_of_string_to_remove:
@@ -31,7 +35,9 @@ def string_cleaning(first_part_of_string_to_remove="", last_part_of_string_to_re
def linear_operation(input_parameter="", math_1="", value_1="", math_2="", value_2=""):
- if is_linear_operation_valid(input_parameter=input_parameter, value_1=value_1, value_2=value_2):
+ if is_linear_operation_valid(
+ input_parameter=input_parameter, value_1=value_1, value_2=value_2
+ ):
operation_to_eval = f"{input_parameter}"
if value_1:
operation_to_eval += f" {math_1} {float(value_1)}"
diff --git a/notebooks/__code/normalization.py b/notebooks/__code/normalization.py
index a85c7fa4..82c5429b 100755
--- a/notebooks/__code/normalization.py
+++ b/notebooks/__code/normalization.py
@@ -138,7 +138,10 @@ def __top_panel(self):
title_ui = widgets.HBox(
[
widgets.Label("Instructions:", layout=widgets.Layout(width="20%")),
- widgets.Label("Select Samples Images and click NEXT", layout=widgets.Layout(width="50%")),
+ widgets.Label(
+ "Select Samples Images and click NEXT",
+ layout=widgets.Layout(width="50%"),
+ ),
]
)
@@ -148,7 +151,9 @@ def __top_panel(self):
widgets.Label("None", layout=widgets.Layout(width="50%")),
]
)
- self.title = title_ui.children[1] # "Select [Samples/OB/DF] Images [and click NEXT]
+ self.title = title_ui.children[
+ 1
+ ] # "Select [Samples/OB/DF] Images [and click NEXT]
self.label = label_ui.children[1] # number of samples selected
self.top_panel = widgets.VBox(children=[title_ui, label_ui], layout=self.layout)
@@ -172,7 +177,9 @@ def __bottom_panel(self):
self.prev_button_ui.on_click(self.prev_button_clicked)
list_ui.append(self.prev_button_ui)
- self.current_state_label_ui = widgets.Label(" ", layout=widgets.Layout(width="70%"))
+ self.current_state_label_ui = widgets.Label(
+ " ", layout=widgets.Layout(width="70%")
+ )
list_ui.append(self.current_state_label_ui)
if self.next_button:
@@ -208,7 +215,9 @@ def nextStep(self):
class WizardPanel:
- label_layout = Layout(border="1px lighgray solide", height="35px", padding="8px", width="300px")
+ label_layout = Layout(
+ border="1px lighgray solide", height="35px", padding="8px", width="300px"
+ )
sample_panel = None
def __init__(self, sample_panel=None):
@@ -223,9 +232,14 @@ class SampleSelectionPanel(Panel):
files = None
o_norm = None
- def __init__(self, prev_button=False, next_button=True, working_dir="", top_object=None):
+ def __init__(
+ self, prev_button=False, next_button=True, working_dir="", top_object=None
+ ):
super(SampleSelectionPanel, self).__init__(
- prev_button=prev_button, next_button=next_button, working_dir=working_dir, top_object=top_object
+ prev_button=prev_button,
+ next_button=next_button,
+ working_dir=working_dir,
+ top_object=top_object,
)
# def __init__(self, prev_button=False, next_button=True, working_dir='', top_object=None, gamma_coefficient=None):
@@ -250,13 +264,17 @@ def __init__(self, working_dir="", top_object=None):
def next_button_clicked(self, event):
self.remove()
- _panel = DFSelectionPanel(working_dir=self.working_dir, top_object=self.top_object)
+ _panel = DFSelectionPanel(
+ working_dir=self.working_dir, top_object=self.top_object
+ )
_panel.init_ui(files=self.files)
_panel.show()
def prev_button_clicked(self, event):
self.remove()
- _panel = SampleSelectionPanel(working_dir=self.working_dir, top_object=self.top_object)
+ _panel = SampleSelectionPanel(
+ working_dir=self.working_dir, top_object=self.top_object
+ )
_panel.init_ui(files=self.files)
_panel.show()
@@ -265,19 +283,27 @@ class DFSelectionPanel(Panel):
def __init__(self, working_dir="", top_object=None):
self.working_dir = working_dir
super(DFSelectionPanel, self).__init__(
- prev_button=True, next_button=True, state="df", working_dir=working_dir, top_object=top_object
+ prev_button=True,
+ next_button=True,
+ state="df",
+ working_dir=working_dir,
+ top_object=top_object,
)
def prev_button_clicked(self, event):
self.remove()
- _panel = OBSelectionPanel(working_dir=self.working_dir, top_object=self.top_object)
+ _panel = OBSelectionPanel(
+ working_dir=self.working_dir, top_object=self.top_object
+ )
_panel.init_ui(files=self.files)
_panel.show()
def next_button_clicked(self, event):
self.remove()
o_norm_handler = NormalizationHandler(
- files=self.files, working_dir=self.working_dir, gamma_threshold=self.gamma_threshold
+ files=self.files,
+ working_dir=self.working_dir,
+ gamma_threshold=self.gamma_threshold,
)
o_norm_handler.load_data()
self.top_object.o_norm_handler = o_norm_handler
@@ -365,11 +391,17 @@ def with_or_without_roi(self):
"Do you want to select a region of interest (ROI) that will make sure that the "
+ "sample background matches the OB background"
)
- label2 = widgets.Label("-> Make sure your selection do not overlap your sample!")
+ label2 = widgets.Label(
+ "-> Make sure your selection do not overlap your sample!"
+ )
box = widgets.HBox(
[
widgets.Label("With or Without ROI?"),
- widgets.RadioButtons(options=["yes", "no"], value="yes", layout=widgets.Layout(width="50%")),
+ widgets.RadioButtons(
+ options=["yes", "no"],
+ value="yes",
+ layout=widgets.Layout(width="50%"),
+ ),
]
)
self.with_or_without_radio_button = box.children[1]
@@ -378,11 +410,15 @@ def with_or_without_roi(self):
def select_sample_roi(self):
if self.with_or_without_radio_button.value == "no":
- label2 = widgets.Label("-> You chose not to select any ROI! Next step: Normalization")
+ label2 = widgets.Label(
+ "-> You chose not to select any ROI! Next step: Normalization"
+ )
display(label2)
return
- label2 = widgets.Label("-> Make sure your selection do not overlap your sample!")
+ label2 = widgets.Label(
+ "-> Make sure your selection do not overlap your sample!"
+ )
display(label2)
if self.integrated_sample == []:
@@ -396,20 +432,51 @@ def plot_roi(x_left, y_top, width, height):
ax_img = plt.subplot(111)
ax_img.imshow(_integrated_sample, cmap="viridis", interpolation=None)
- _rectangle = patches.Rectangle((x_left, y_top), width, height, edgecolor="white", linewidth=2, fill=False)
+ _rectangle = patches.Rectangle(
+ (x_left, y_top),
+ width,
+ height,
+ edgecolor="white",
+ linewidth=2,
+ fill=False,
+ )
ax_img.add_patch(_rectangle)
return [x_left, y_top, width, height]
self.roi_selection = widgets.interact(
plot_roi,
- x_left=widgets.IntSlider(min=0, max=width, step=1, value=0, description="X Left", continuous_update=False),
- y_top=widgets.IntSlider(min=0, max=height, value=0, step=1, description="Y Top", continuous_update=False),
+ x_left=widgets.IntSlider(
+ min=0,
+ max=width,
+ step=1,
+ value=0,
+ description="X Left",
+ continuous_update=False,
+ ),
+ y_top=widgets.IntSlider(
+ min=0,
+ max=height,
+ value=0,
+ step=1,
+ description="Y Top",
+ continuous_update=False,
+ ),
width=widgets.IntSlider(
- min=0, max=width - 1, step=1, value=60, description="Width", continuous_update=False
+ min=0,
+ max=width - 1,
+ step=1,
+ value=60,
+ description="Width",
+ continuous_update=False,
),
height=widgets.IntSlider(
- min=0, max=height - 1, step=1, value=100, description="Height", continuous_update=False
+ min=0,
+ max=height - 1,
+ step=1,
+ value=100,
+ description="Height",
+ continuous_update=False,
),
)
@@ -478,9 +545,15 @@ def display_file_selector_from_home(ev):
hbox = widgets.HBox(
[
widgets.Button(
- description=f"Jump to {ipts} Shared Folder", button_style="success", layout=button_layout
+ description=f"Jump to {ipts} Shared Folder",
+ button_style="success",
+ layout=button_layout,
+ ),
+ widgets.Button(
+ description="Jump to My Home Folder",
+ button_style="success",
+ layout=button_layout,
),
- widgets.Button(description="Jump to My Home Folder", button_style="success", layout=button_layout),
]
)
go_to_shared_button_ui = hbox.children[0]
@@ -495,13 +568,20 @@ def display_file_selector_from_home(ev):
def display_file_selector(self, start_dir=""):
self.output_folder_ui = fileselector.FileSelectorPanel(
- instruction="Select Output Folder", start_dir=start_dir, multiple=False, type="directory"
+ instruction="Select Output Folder",
+ start_dir=start_dir,
+ multiple=False,
+ type="directory",
)
self.output_folder_ui.show()
def export(self):
- base_folder = os.path.basename(os.path.dirname(self.list_file_names[0])) + "_normalized"
- output_folder = os.path.abspath(os.path.join(self.output_folder_ui.selected, base_folder))
+ base_folder = (
+ os.path.basename(os.path.dirname(self.list_file_names[0])) + "_normalized"
+ )
+ output_folder = os.path.abspath(
+ os.path.join(self.output_folder_ui.selected, base_folder)
+ )
utilities.make_dir(dir=output_folder)
w = widgets.IntProgress()
@@ -536,7 +616,10 @@ def select_gamma_coefficient(self):
[
widgets.Label("Gamma Coefficient:", layout=widgets.Layout(width="20%")),
widgets.FloatSlider(
- value=gamma_filtering_coefficient, min=0, max=1, layout=widgets.Layout(width="50%")
+ value=gamma_filtering_coefficient,
+ min=0,
+ max=1,
+ layout=widgets.Layout(width="50%"),
),
]
)
diff --git a/notebooks/__code/normalization/__init__.py b/notebooks/__code/normalization/__init__.py
index 001fedd2..bf043927 100755
--- a/notebooks/__code/normalization/__init__.py
+++ b/notebooks/__code/normalization/__init__.py
@@ -1,6 +1,8 @@
LOG_FILENAME = ".normalization_with_simplify_selection.log"
-TEMPORARY_ROI_BUTTON_DESCRIPTION = "Check UI that popped up! (May be hidden behind this browser!)"
+TEMPORARY_ROI_BUTTON_DESCRIPTION = (
+ "Check UI that popped up! (May be hidden behind this browser!)"
+)
TEMPORARY_ROI_ICON = ""
ROI_BUTTON_DESCRIPTION = "Selection of region of interest (ROI) - OPTIONAL"
ROI_ICON = "gear"
diff --git a/notebooks/__code/normalization/get.py b/notebooks/__code/normalization/get.py
index 10cd67c3..530330ac 100755
--- a/notebooks/__code/normalization/get.py
+++ b/notebooks/__code/normalization/get.py
@@ -8,7 +8,11 @@
from . import ROI_BUTTON_DESCRIPTION, ROI_ICON
-LIST_METADATA_NOT_INSTRUMENT_RELATED = ["filename", "time_stamp", "time_stamp_user_format"]
+LIST_METADATA_NOT_INSTRUMENT_RELATED = [
+ "filename",
+ "time_stamp",
+ "time_stamp_user_format",
+]
class Get(TopGet):
@@ -36,15 +40,18 @@ def active_tab_config_key(self):
return current_config_tab.get_title(current_config_tab_index)
def time_before_and_after_of_this_config(self, current_config=None):
- [time_before_selected_ui, time_after_selected_ui] = self.time_before_and_after_ui_of_this_config(
- current_config=current_config
+ [time_before_selected_ui, time_after_selected_ui] = (
+ self.time_before_and_after_ui_of_this_config(current_config=current_config)
)
return [time_before_selected_ui.value, time_after_selected_ui.value]
def time_before_and_after_ui_of_this_config(self, current_config=None):
if current_config is None:
current_config = self.current_config_of_widgets_id()
- return [current_config["time_slider_before_experiment"], current_config["time_slider_after_experiment"]]
+ return [
+ current_config["time_slider_before_experiment"],
+ current_config["time_slider_after_experiment"],
+ ]
def time_before_and_after_message_ui_of_this_config(self):
current_config = self.current_config_of_widgets_id()
@@ -80,7 +87,9 @@ def max_time_elapse_before_experiment(self):
dict_for_this_config = final_full_master_dict[acquisition_key][config_key]
# retrieve first and last sample file for this config and for this acquisition
- first_sample_image_time_stamp = dict_for_this_config["first_images"]["sample"]["time_stamp"]
+ first_sample_image_time_stamp = dict_for_this_config["first_images"]["sample"][
+ "time_stamp"
+ ]
first_ob = dict_for_this_config["first_images"]["ob"]["time_stamp"]
if first_ob > first_sample_image_time_stamp:
@@ -101,7 +110,9 @@ def max_time_elapse_after_experiment(self):
dict_for_this_config = final_full_master_dict[acquisition_key][config_key]
# retrieve first and last sample file for this config and for this acquisition
- last_sample_images_time_stamp = dict_for_this_config["last_images"]["sample"]["time_stamp"]
+ last_sample_images_time_stamp = dict_for_this_config["last_images"]["sample"][
+ "time_stamp"
+ ]
last_ob = dict_for_this_config["last_images"]["ob"]["time_stamp"]
if last_ob < last_sample_images_time_stamp:
@@ -113,7 +124,10 @@ def full_layout_for_this_config(self, dict_config):
config_widgets_id_dict = {}
def _make_list_basename_file(list_name="list_sample"):
- return [os.path.basename(_entry["filename"]) for _entry in dict_config[list_name]]
+ return [
+ os.path.basename(_entry["filename"])
+ for _entry in dict_config[list_name]
+ ]
def _make_full_file_name(list_name="list_sample"):
return [_entry["filename"] for _entry in dict_config[list_name]]
@@ -124,20 +138,32 @@ def _make_full_file_name(list_name="list_sample"):
# normalize or not this configuration
use_this_config_widget = widgets.Checkbox(
- description="Normalize this configuration", value=True, layout=widgets.Layout(width="100%")
+ description="Normalize this configuration",
+ value=True,
+ layout=widgets.Layout(width="100%"),
+ )
+ use_this_config_widget.observe(
+ self.parent.update_use_this_config_widget, names="value"
)
- use_this_config_widget.observe(self.parent.update_use_this_config_widget, names="value")
config_widgets_id_dict["use_this_config"] = use_this_config_widget
# use custom time range check box
check_box_user_time_range = widgets.Checkbox(
- description="Use selected OB & DF from custom time range", value=False, layout=widgets.Layout(width="35%")
+ description="Use selected OB & DF from custom time range",
+ value=False,
+ layout=widgets.Layout(width="35%"),
+ )
+ config_widgets_id_dict["use_custom_time_range_checkbox"] = (
+ check_box_user_time_range
+ )
+ check_box_user_time_range.observe(
+ self.parent.update_config_widgets, names="value"
)
- config_widgets_id_dict["use_custom_time_range_checkbox"] = check_box_user_time_range
- check_box_user_time_range.observe(self.parent.update_config_widgets, names="value")
[max_time_elapse_before_experiment, max_time_elapse_after_experiment] = (
- self.parent.calculate_max_time_before_and_after_exp_for_this_config(dict_config)
+ self.parent.calculate_max_time_before_and_after_exp_for_this_config(
+ dict_config
+ )
)
hori_layout1 = widgets.HBox(
@@ -151,7 +177,10 @@ def _make_full_file_name(list_name="list_sample"):
readout=False,
layout=widgets.Layout(width="30%", visibility="hidden"),
),
- widgets.Label(" <<< EXPERIMENT >>> ", layout=widgets.Layout(width="20%", visibility="hidden")),
+ widgets.Label(
+ " <<< EXPERIMENT >>> ",
+ layout=widgets.Layout(width="20%", visibility="hidden"),
+ ),
widgets.FloatSlider(
value=max_time_elapse_before_experiment + 0.1,
min=0,
@@ -166,10 +195,18 @@ def _make_full_file_name(list_name="list_sample"):
self.parent.time_before_slider = hori_layout1.children[1]
self.parent.time_after_slider = hori_layout1.children[3]
self.parent.experiment_label = hori_layout1.children[2]
- self.parent.time_after_slider.observe(self.parent.update_time_range_event, names="value")
- self.parent.time_before_slider.observe(self.parent.update_time_range_event, names="value")
- config_widgets_id_dict["time_slider_before_experiment"] = hori_layout1.children[1]
- config_widgets_id_dict["time_slider_after_experiment"] = hori_layout1.children[3]
+ self.parent.time_after_slider.observe(
+ self.parent.update_time_range_event, names="value"
+ )
+ self.parent.time_before_slider.observe(
+ self.parent.update_time_range_event, names="value"
+ )
+ config_widgets_id_dict["time_slider_before_experiment"] = hori_layout1.children[
+ 1
+ ]
+ config_widgets_id_dict["time_slider_after_experiment"] = hori_layout1.children[
+ 3
+ ]
config_widgets_id_dict["experiment_label"] = hori_layout1.children[2]
nbr_sample = len(list_sample)
@@ -181,9 +218,13 @@ def get_html_table():
how_to_combine = how_to_ui.value
if force_combine == "yes":
- description = f"OBs will be combined using {how_to_combine}"
+ description = (
+ f"OBs will be combined using {how_to_combine}"
+ )
else:
- description = "OBs won't be combined! Each sample will use 1 OB"
+ description = (
+ "OBs won't be combined! Each sample will use 1 OB"
+ )
html_table = (
f"
"
@@ -216,25 +257,38 @@ def get_html_table():
force_ui_disabled = False
html_string = ""
force_ui = widgets.RadioButtons(
- options=["yes", "no"], value="yes", disabled=force_ui_disabled, layout=widgets.Layout(width="200px")
+ options=["yes", "no"],
+ value="yes",
+ disabled=force_ui_disabled,
+ layout=widgets.Layout(width="200px"),
)
force_ui.observe(self.parent.do_you_want_to_combine_changed, names="value")
combine_or_no_ui = widgets.VBox(
- [widgets.HTML("Do you want to combine the OBs?"), force_ui, widgets.HTML(html_string)]
+ [
+ widgets.HTML("Do you want to combine the OBs?"),
+ force_ui,
+ widgets.HTML(html_string),
+ ]
)
config_widgets_id_dict["force_combine"] = force_ui
config_widgets_id_dict["force_combine_message"] = combine_or_no_ui.children[2]
# how to combine widgets
how_to_ui = widgets.RadioButtons(
- options=["median", "mean"], value="median", layout=widgets.Layout(width="200px")
+ options=["median", "mean"],
+ value="median",
+ layout=widgets.Layout(width="200px"),
)
how_to_ui.observe(self.parent.how_to_combine_changed, names="value")
- how_to_combine_ui = widgets.VBox([widgets.HTML("How to combine the OBs?"), how_to_ui])
+ how_to_combine_ui = widgets.VBox(
+ [widgets.HTML("How to combine the OBs?"), how_to_ui]
+ )
config_widgets_id_dict["how_to_combine"] = how_to_ui
# table
- table_title = widgets.HTML("S U M M A R Y")
+ table_title = widgets.HTML(
+ "S U M M A R Y"
+ )
html_table = ""
table = widgets.HTML(value=html_table)
@@ -253,43 +307,65 @@ def get_html_table():
config_widgets_id_dict["time_slider_before_message"] = hori_layout2.children[1]
# table of metadata
- [metadata_table_label, metadata_table] = self.parent.populate_metadata_table(dict_config)
+ [metadata_table_label, metadata_table] = self.parent.populate_metadata_table(
+ dict_config
+ )
select_width = "100%"
sample_list_of_runs = widgets.VBox(
[
- widgets.HTML("List of Sample runs (ALL RUNS listed here will be " "used!"),
- widgets.Select(options=list_sample, layout=widgets.Layout(width=select_width, height="300px")),
+ widgets.HTML(
+ "List of Sample runs (ALL RUNS listed here will be " "used!"
+ ),
+ widgets.Select(
+ options=list_sample,
+ layout=widgets.Layout(width=select_width, height="300px"),
+ ),
],
layout=widgets.Layout(width="100%"),
)
# self.list_of_runs_ui = box0.children[1]
ob_list_of_runs = widgets.VBox(
[
- widgets.HTML("List of OBs. Only the selected images will be used!"),
+ widgets.HTML(
+ "List of OBs. Only the selected images will be used!"
+ ),
widgets.SelectMultiple(
- options=list_ob, value=list_ob, layout=widgets.Layout(width=select_width, height="300px")
+ options=list_ob,
+ value=list_ob,
+ layout=widgets.Layout(width=select_width, height="300px"),
),
],
layout=widgets.Layout(width="100%"),
)
- ob_list_of_runs.children[1].observe(self.parent.selection_of_ob_changed, names="value")
+ ob_list_of_runs.children[1].observe(
+ self.parent.selection_of_ob_changed, names="value"
+ )
df_list_of_runs = widgets.VBox(
[
- widgets.HTML("List of DCs.Only the selected images will be used!"),
+ widgets.HTML(
+ "List of DCs.Only the selected images will be used!"
+ ),
widgets.SelectMultiple(
- options=list_df, value=list_df, layout=widgets.Layout(width=select_width, height="300px")
+ options=list_df,
+ value=list_df,
+ layout=widgets.Layout(width=select_width, height="300px"),
),
],
layout=widgets.Layout(width="100%"),
)
red_hr_line = widgets.HTML("
")
- black_hr_line = widgets.HTML("
")
+ black_hr_line = widgets.HTML(
+ "
"
+ )
# select ROI
select_roi_button = widgets.Button(
- description=ROI_BUTTON_DESCRIPTION, button_style="", layout=widgets.Layout(width="100%"), icon=ROI_ICON
+ description=ROI_BUTTON_DESCRIPTION,
+ button_style="",
+ layout=widgets.Layout(width="100%"),
+ icon=ROI_ICON,
)
select_roi_button.style.button_color = "lightgreen"
select_roi_button.style.font_weight = "bold"
@@ -330,11 +406,16 @@ def get_html_table():
]
)
- return {"verti_layout": verti_layout, "config_widgets_id_dict": config_widgets_id_dict}
+ return {
+ "verti_layout": verti_layout,
+ "config_widgets_id_dict": config_widgets_id_dict,
+ }
@staticmethod
def list_of_tiff_files(folder=""):
- list_of_tiff_files = file_handler.get_list_of_files(folder=folder, extension="tiff")
+ list_of_tiff_files = file_handler.get_list_of_files(
+ folder=folder, extension="tiff"
+ )
return list_of_tiff_files
@staticmethod
diff --git a/notebooks/__code/normalization/metadata_handler.py b/notebooks/__code/normalization/metadata_handler.py
index 0a1dcaa8..8d9cf35c 100755
--- a/notebooks/__code/normalization/metadata_handler.py
+++ b/notebooks/__code/normalization/metadata_handler.py
@@ -58,14 +58,21 @@ def retrieve_metadata(list_of_files=None, display_infos=False, label=""):
_dict = file_handler.retrieve_time_stamp(list_of_files, label=label)
_time_metadata_dict = MetadataHandler._reformat_dict(dictionary=_dict)
- _beamline_metadata_dict = MetadataHandler.retrieve_beamline_metadata(list_of_files)
+ _beamline_metadata_dict = MetadataHandler.retrieve_beamline_metadata(
+ list_of_files
+ )
_metadata_dict = combine_dictionaries(
- master_dictionary=_time_metadata_dict, servant_dictionary=_beamline_metadata_dict
+ master_dictionary=_time_metadata_dict,
+ servant_dictionary=_beamline_metadata_dict,
)
if display_infos:
display(
- HTML('Nbr of images: ' + str(len(_metadata_dict)) + "Nbr of images: '
+ + str(len(_metadata_dict))
+ + "{len(b)} Sample files selected"))
+ display(
+ HTML(
+ f"{len(b)} Sample files selected"
+ )
+ )
print(f"{len(b)} sample files selected")
def select_ob_runs(self):
@@ -74,7 +79,11 @@ def select_ob_runs(self):
def ob_next_function(self, b):
notebook_logging.info(f"OB files selected: {b}")
self.ob_runs = b
- display(HTML(f"{len(b)} OB files selected"))
+ display(
+ HTML(
+ f"{len(b)} OB files selected"
+ )
+ )
print(f"{len(b)} OB files selected")
def select_dc_runs(self):
@@ -89,15 +98,21 @@ def select_dc_runs(self):
def dc_next_function(self, b):
notebook_logging.info(f"DC files selected: {b}")
self.dc_runs = b
- display(HTML(f"{len(b)} DC files selected"))
+ display(
+ HTML(
+ f"{len(b)} DC files selected"
+ )
+ )
print(f"{len(b)} DC files selected")
- def select_data(self, instruction="Select data runs",
- next_function=None,
- start_dir=None,
- multiple=True,
- newdir_toolbar_button=False):
-
+ def select_data(
+ self,
+ instruction="Select data runs",
+ next_function=None,
+ start_dir=None,
+ multiple=True,
+ newdir_toolbar_button=False,
+ ):
self.list_input_folders_ui = MyFileSelectorPanel(
instruction=instruction,
start_dir=start_dir,
@@ -139,14 +154,15 @@ def normalization_settings(self):
list_files.ob = self.ob_runs
list_files.df = self.dc_runs
- self.o_norm_handler = NormalizationHandler(list_files=list_files,
- working_dir=self.working_dir,
- sample_data=self.data_array)
+ self.o_norm_handler = NormalizationHandler(
+ list_files=list_files,
+ working_dir=self.working_dir,
+ sample_data=self.data_array,
+ )
self.o_norm_handler.load_data()
self.o_norm_handler.settings()
def select_output_folder(self):
-
self.list_input_folders_ui = MyFileSelectorPanel(
instruction="Select output folder",
start_dir=self.working_dir,
@@ -161,7 +177,11 @@ def select_output_folder(self):
def normalized_and_export(self, output_folder):
notebook_logging.info(f"Output folder selected: {output_folder}")
- display(HTML(f"Output folder selected: {output_folder}"))
+ display(
+ HTML(
+ f"Output folder selected: {output_folder}"
+ )
+ )
self.output_folder = output_folder
def export_normalized_data(self):
@@ -181,22 +201,18 @@ def export_normalized_data(self):
def legend(cls) -> None:
display(HTML("
"))
display(HTML("Legend
"))
- display(HTML(""
- "- Mandatory steps must be performed to ensure proper data processing.
"
- "- Optional but recommended steps are not mandatory but should be performed to ensure proper data processing.
"
- "- Optional steps are not mandatory but highly recommended to improve the quality of your data processing.
"
- "
"))
+ display(
+ HTML(
+ ""
+ "- Mandatory steps must be performed to ensure proper data processing.
"
+ "- Optional but recommended steps are not mandatory but should be performed to ensure proper data processing.
"
+ "- Optional steps are not mandatory but highly recommended to improve the quality of your data processing.
"
+ "
"
+ )
+ )
display(HTML("
"))
-
-
-
-
-
-
-
-
class NormalizationHandler:
data = None
integrated_sample = []
@@ -205,16 +221,18 @@ class NormalizationHandler:
normalized_data_array = []
- def __init__(self, list_files: ListFiles = None,
- working_dir: str = "",
- gamma_threshold: float = 0.9,
- sample_data: list = None):
-
+ def __init__(
+ self,
+ list_files: ListFiles = None,
+ working_dir: str = "",
+ gamma_threshold: float = 0.9,
+ sample_data: list = None,
+ ):
self.files = list_files
self.working_dir = working_dir
self.data = Data()
if sample_data is not None:
- self.data.sample = sample_data
+ self.data.sample = sample_data
self.gamma_threshold = gamma_threshold
@@ -379,7 +397,11 @@ def widgets_changed():
self.how_to_ui.disabled = False
else:
accordion_children = [self.force_ui, self.how_to_ui, table]
- accordion_title = [force_combine_title, how_to_combine_title, table_title]
+ accordion_title = [
+ force_combine_title,
+ how_to_combine_title,
+ table_title,
+ ]
self.how_to_ui.disabled = False
table.value = get_html_table()
accordion.children = accordion_children
@@ -392,9 +414,13 @@ def get_html_table():
how_to_combine = self.how_to_ui.value
if force_combine == "yes":
- description = f"OBs will be combined using {how_to_combine}"
+ description = (
+ f"OBs will be combined using {how_to_combine}"
+ )
else:
- description = "OBs won't be combined! Each sample will use 1 OB"
+ description = (
+ "OBs won't be combined! Each sample will use 1 OB"
+ )
html_table = (
f""
@@ -418,13 +444,18 @@ def get_html_table():
accordion_title = list()
self.force_ui = widgets.RadioButtons(
- options=["yes", "no"], value="yes", disabled=False, layout=widgets.Layout(width="200px")
+ options=["yes", "no"],
+ value="yes",
+ disabled=False,
+ layout=widgets.Layout(width="200px"),
)
accordion_children.append(self.force_ui)
self.force_ui.observe(force_combining_changed, names="value")
self.how_to_ui = widgets.RadioButtons(
- options=["median", "mean"], value="median", layout=widgets.Layout(width="200px")
+ options=["median", "mean"],
+ value="median",
+ layout=widgets.Layout(width="200px"),
)
accordion_children.append(self.how_to_ui)
self.how_to_ui.observe(how_to_combine_changed, names="value")
@@ -445,7 +476,9 @@ def get_html_table():
table.value = get_html_table()
- accordion = widgets.Accordion(children=accordion_children, title=accordion_title)
+ accordion = widgets.Accordion(
+ children=accordion_children, title=accordion_title
+ )
for _index, _title in enumerate(accordion_title):
accordion.set_title(_index, _title)
@@ -465,13 +498,18 @@ def run_normalization(self, dict_roi=None):
elif how_to_combine == "median":
force_median_ob = True
else:
- raise NotImplementedError(f"How to combine OB algorithm ({how_to_combine}) not implemented!")
+ raise NotImplementedError(
+ f"How to combine OB algorithm ({how_to_combine}) not implemented!"
+ )
if dict_roi is None:
# try:
self.o_norm.df_correction()
self.o_norm.normalization(
- notebook=True, force_median_ob=force_median_ob, force_mean_ob=force_mean_ob, force=True
+ notebook=True,
+ force_median_ob=force_median_ob,
+ force_mean_ob=force_mean_ob,
+ force=True,
)
self.normalized_data_array = self.o_norm.get_normalized_data()
self.normalized_metadata_array = self.o_norm.data["sample"]["metadata"]
@@ -514,7 +552,7 @@ def run_normalization(self, dict_roi=None):
force_median_ob=force_median_ob,
force_mean_ob=force_mean_ob,
force=True,
- )
+ )
# except ValueError:
# display(
# HTML(
@@ -551,9 +589,15 @@ def display_file_selector_from_home(ev):
hbox = widgets.HBox(
[
widgets.Button(
- description=f"Jump to {ipts} Shared Folder", button_style="success", layout=button_layout
+ description=f"Jump to {ipts} Shared Folder",
+ button_style="success",
+ layout=button_layout,
+ ),
+ widgets.Button(
+ description="Jump to My Home Folder",
+ button_style="success",
+ layout=button_layout,
),
- widgets.Button(description="Jump to My Home Folder", button_style="success", layout=button_layout),
]
)
go_to_shared_button_ui = hbox.children[0]
@@ -568,12 +612,17 @@ def display_file_selector_from_home(ev):
def display_file_selector(self, start_dir=""):
self.output_folder_ui = fileselector.FileSelectorPanel(
- instruction="Select Output Folder", start_dir=start_dir, multiple=False, type="directory"
+ instruction="Select Output Folder",
+ start_dir=start_dir,
+ multiple=False,
+ type="directory",
)
self.output_folder_ui.show()
def export(self, output_folder):
- base_folder = os.path.basename(os.path.dirname(self.list_file_names[0])) + "_normalized"
+ base_folder = (
+ os.path.basename(os.path.dirname(self.list_file_names[0])) + "_normalized"
+ )
output_folder = os.path.join(output_folder, base_folder)
output_folder = make_or_increment_folder_name(output_folder)
@@ -609,7 +658,10 @@ def select_gamma_coefficient(self):
[
widgets.Label("Gamma Coefficient:", layout=widgets.Layout(width="20%")),
widgets.FloatSlider(
- value=gamma_filtering_coefficient, min=0, max=1, layout=widgets.Layout(width="50%")
+ value=gamma_filtering_coefficient,
+ min=0,
+ max=1,
+ layout=widgets.Layout(width="50%"),
),
]
)
@@ -619,16 +671,6 @@ def get_coefficient(self):
return self.gamma_coeff_ui.children[1].value
-
-
-
-
-
-
-
-
-
-
def close(w):
"recursively close a widget"
if hasattr(w, "children"):
@@ -754,7 +796,10 @@ def __top_panel(self):
title_ui = widgets.HBox(
[
widgets.Label("Instructions:", layout=widgets.Layout(width="20%")),
- widgets.Label("Select Samples Images and click NEXT", layout=widgets.Layout(width="50%")),
+ widgets.Label(
+ "Select Samples Images and click NEXT",
+ layout=widgets.Layout(width="50%"),
+ ),
]
)
@@ -764,7 +809,9 @@ def __top_panel(self):
# widgets.Label("None", layout=widgets.Layout(width="50%")),
# ]
# )
- self.title = title_ui.children[1] # "Select [Samples/OB/DF] Images [and click NEXT]
+ self.title = title_ui.children[
+ 1
+ ] # "Select [Samples/OB/DF] Images [and click NEXT]
# self.label = label_ui.children[1] # number of samples selected
# self.top_panel = widgets.VBox(children=[title_ui, label_ui], layout=self.layout)
@@ -789,7 +836,9 @@ def __bottom_panel(self):
self.prev_button_ui.on_click(self.prev_button_clicked)
list_ui.append(self.prev_button_ui)
- self.current_state_label_ui = widgets.Label(" ", layout=widgets.Layout(width="70%"))
+ self.current_state_label_ui = widgets.Label(
+ " ", layout=widgets.Layout(width="70%")
+ )
list_ui.append(self.current_state_label_ui)
if self.next_button:
@@ -826,7 +875,9 @@ def nextStep(self):
class WizardPanel:
- label_layout = Layout(border="1px lighgray solide", height="35px", padding="8px", width="300px")
+ label_layout = Layout(
+ border="1px lighgray solide", height="35px", padding="8px", width="300px"
+ )
sample_panel = None
def __init__(self, sample_panel=None):
@@ -900,6 +951,3 @@ def __init__(self, sample_panel=None):
# o_norm_handler.load_data()
# self.top_object.o_norm_handler = o_norm_handler
# self.top_object.o_norm = o_norm_handler.o_norm
-
-
-
diff --git a/notebooks/__code/normalization/normalization_with_simplify_selection.py b/notebooks/__code/normalization/normalization_with_simplify_selection.py
index c0ce7beb..03b6cbd0 100755
--- a/notebooks/__code/normalization/normalization_with_simplify_selection.py
+++ b/notebooks/__code/normalization/normalization_with_simplify_selection.py
@@ -12,16 +12,29 @@
from __code.ipywe import myfileselector
from __code.normalization import utilities
from __code.normalization.get import Get
-from __code.normalization.metadata_handler import METADATA_KEYS, MetadataHandler, MetadataName
+from __code.normalization.metadata_handler import (
+ METADATA_KEYS,
+ MetadataHandler,
+ MetadataName,
+)
from __code.roi_selection_ui import Interface
-from . import ROI_BUTTON_DESCRIPTION, ROI_ICON, TEMPORARY_ROI_BUTTON_DESCRIPTION, TEMPORARY_ROI_ICON
+from . import (
+ ROI_BUTTON_DESCRIPTION,
+ ROI_ICON,
+ TEMPORARY_ROI_BUTTON_DESCRIPTION,
+ TEMPORARY_ROI_ICON,
+)
JSON_DEBUGGING = False
MAX_DF_COUNTS_ALLOWED = 900
METADATA_ERROR_ALLOWED = 1
-LIST_METADATA_NOT_INSTRUMENT_RELATED = ["filename", "time_stamp", "time_stamp_user_format"]
+LIST_METADATA_NOT_INSTRUMENT_RELATED = [
+ "filename",
+ "time_stamp",
+ "time_stamp_user_format",
+]
class NormalizationWithSimplifySelection:
@@ -95,14 +108,22 @@ def select_sample_folder(self):
def retrieve_sample_metadata_from_sample_folder(self, sample_folder):
logging.info(f"select sample folder: {sample_folder}")
- [list_of_images, _] = file_handler.retrieve_list_of_most_dominant_extension_from_folder(folder=sample_folder)
+ [list_of_images, _] = (
+ file_handler.retrieve_list_of_most_dominant_extension_from_folder(
+ folder=sample_folder
+ )
+ )
can_we_continue = self.images_files_found_in_list(list_of_images)
if can_we_continue:
logging.info(f"-> number of images found: {len(list_of_images)}")
self.retrieve_sample_metadata(list_of_images)
else:
logging.info("-> No images found!")
- display(HTML('No images found in the folder selected!'))
+ display(
+ HTML(
+ 'No images found in the folder selected!'
+ )
+ )
def images_files_found_in_list(self, list_of_images):
for _file in list_of_images:
@@ -127,19 +148,27 @@ def retrieve_sample_metadata(self, list_of_images):
# self.display_time_range_selection_widgets()
def select_ob_folder(self):
- self.select_folder(message="open beam", next_function=self.retrieve_ob_metadata())
+ self.select_folder(
+ message="open beam", next_function=self.retrieve_ob_metadata()
+ )
def retrieve_ob_metadata(self, selected_folder):
list_of_ob_files = Get.list_of_tiff_files(folder=selected_folder)
- self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files)
+ self.ob_metadata_dict = MetadataHandler.retrieve_metadata(
+ list_of_files=list_of_ob_files
+ )
def auto_retrieve_ob_metadata(self):
logging.info("> auto_retrieve_ob_metadata")
folder = os.path.join(self.working_dir, "raw", "ob")
logging.info(f"-> folder: {folder}")
- list_of_ob_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder, extensions=["tiff", "tif"])
+ list_of_ob_files = file_handler.get_list_of_all_files_in_subfolders(
+ folder=folder, extensions=["tiff", "tif"]
+ )
logging.info(f"-> nbr of ob files found: {len(list_of_ob_files)}")
- self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files, label="ob")
+ self.ob_metadata_dict = MetadataHandler.retrieve_metadata(
+ list_of_files=list_of_ob_files, label="ob"
+ )
# logging.info(f"ob metadata dict")
# logging.info(f"-> {self.ob_metadata_dict}")
@@ -155,11 +184,15 @@ def select_folder(self, message="", next_function=None):
folder_widget.show()
def select_df_folder(self):
- self.select_folder(message="dark current", next_function=self.retrieve_df_metadata())
+ self.select_folder(
+ message="dark current", next_function=self.retrieve_df_metadata()
+ )
def retrieve_df_metadata(self, selected_folder):
list_of_df_files = Get.list_of_tiff_files(folder=selected_folder)
- self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_df_files)
+ self.df_metadata_dict = MetadataHandler.retrieve_metadata(
+ list_of_files=list_of_df_files
+ )
def auto_retrieve_df_metadata(self):
folder_df = os.path.join(self.working_dir, "raw", "df")
@@ -172,7 +205,9 @@ def auto_retrieve_df_metadata(self):
)
list_of_files = list_of_df_files + list_of_dc_files
logging.info(f"-> nbr of dc files found: {len(list_of_files)}")
- self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_files, label="df")
+ self.df_metadata_dict = MetadataHandler.retrieve_metadata(
+ list_of_files=list_of_files, label="df"
+ )
def match_files(self):
"""This is where the files will be associated with their respective OB, DC by using the metadata"""
@@ -201,14 +236,26 @@ def match_ob(self):
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_ob in list_ob_dict.keys():
- _all_ob_instrument_metadata = Get.get_instrument_metadata_only(list_ob_dict[_index_ob])
- _ob_instrument_metadata = utilities.isolate_instrument_metadata(_all_ob_instrument_metadata)
- _acquisition_time = _all_ob_instrument_metadata[MetadataName.EXPOSURE_TIME.value]["value"]
+ _all_ob_instrument_metadata = Get.get_instrument_metadata_only(
+ list_ob_dict[_index_ob]
+ )
+ _ob_instrument_metadata = utilities.isolate_instrument_metadata(
+ _all_ob_instrument_metadata
+ )
+ _acquisition_time = _all_ob_instrument_metadata[
+ MetadataName.EXPOSURE_TIME.value
+ ]["value"]
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
- _sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]["metadata_infos"]
- if utilities.all_metadata_match(_sample_metadata_infos, _ob_instrument_metadata):
- final_full_master_dict[_acquisition_time][_config_id]["list_ob"].append(list_ob_dict[_index_ob])
+ _sample_metadata_infos = final_full_master_dict[_acquisition_time][
+ _config_id
+ ]["metadata_infos"]
+ if utilities.all_metadata_match(
+ _sample_metadata_infos, _ob_instrument_metadata
+ ):
+ final_full_master_dict[_acquisition_time][_config_id][
+ "list_ob"
+ ].append(list_ob_dict[_index_ob])
self.final_full_master_dict = final_full_master_dict
@@ -224,20 +271,30 @@ def match_df(self):
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_df in list_df_dict.keys():
- _all_df_instrument_metadata = Get.get_instrument_metadata_only(list_df_dict[_index_df])
- _df_instrument_metadata = utilities.isolate_instrument_metadata(_all_df_instrument_metadata)
- _acquisition_time = _all_df_instrument_metadata[MetadataName.EXPOSURE_TIME.value]["value"]
+ _all_df_instrument_metadata = Get.get_instrument_metadata_only(
+ list_df_dict[_index_df]
+ )
+ _df_instrument_metadata = utilities.isolate_instrument_metadata(
+ _all_df_instrument_metadata
+ )
+ _acquisition_time = _all_df_instrument_metadata[
+ MetadataName.EXPOSURE_TIME.value
+ ]["value"]
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
- _sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]["metadata_infos"]
+ _sample_metadata_infos = final_full_master_dict[_acquisition_time][
+ _config_id
+ ]["metadata_infos"]
if utilities.all_metadata_match(
_sample_metadata_infos,
_df_instrument_metadata,
list_key_to_check=[METADATA_KEYS["df"][1].value],
):
- final_full_master_dict[_acquisition_time][_config_id]["list_df"].append(list_df_dict[_index_df])
+ final_full_master_dict[_acquisition_time][_config_id][
+ "list_df"
+ ].append(list_df_dict[_index_df])
self.final_full_master_dict = final_full_master_dict
@@ -253,8 +310,12 @@ def create_master_sample_dict(self):
_dict_file_index = sample_metadata_dict[_file_index]
_sample_file = _dict_file_index["filename"]
- _acquisition_time = _dict_file_index[MetadataName.EXPOSURE_TIME.value]["value"]
- _instrument_metadata = utilities.isolate_instrument_metadata(_dict_file_index)
+ _acquisition_time = _dict_file_index[MetadataName.EXPOSURE_TIME.value][
+ "value"
+ ]
+ _instrument_metadata = utilities.isolate_instrument_metadata(
+ _dict_file_index
+ )
_sample_time_stamp = _dict_file_index["time_stamp"]
# find which image was first and which image was last
@@ -264,7 +325,9 @@ def create_master_sample_dict(self):
last_sample_image = _dict_file_index
# first entry or first time seeing that acquisition time
- if (len(final_full_master_dict) == 0) or _acquisition_time not in final_full_master_dict.keys():
+ if (
+ len(final_full_master_dict) == 0
+ ) or _acquisition_time not in final_full_master_dict.keys():
_first_images_dict = {"sample": first_sample_image, "ob": {}, "df": {}}
_last_images_dict = {"sample": last_sample_image, "ob": {}, "df": {}}
_temp_dict = {
@@ -275,7 +338,9 @@ def create_master_sample_dict(self):
"list_df": [],
"time_range_s_selected": {"before": np.nan, "after": np.nan},
"time_range_s": {"before": np.nan, "after": np.nan},
- "metadata_infos": Get.get_instrument_metadata_only(_instrument_metadata),
+ "metadata_infos": Get.get_instrument_metadata_only(
+ _instrument_metadata
+ ),
}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]["config0"] = _temp_dict
@@ -283,25 +348,44 @@ def create_master_sample_dict(self):
# check that all the metadata_infos match for the first group of that acquisition time,
# otherwise check the next one or create a group
if _acquisition_time in final_full_master_dict.keys():
- _dict_for_this_acquisition_time = final_full_master_dict[_acquisition_time]
+ _dict_for_this_acquisition_time = final_full_master_dict[
+ _acquisition_time
+ ]
_found_a_match = False
for _config_key in _dict_for_this_acquisition_time.keys():
_config = _dict_for_this_acquisition_time[_config_key]
if utilities.all_metadata_match(
- metadata_1=_config["metadata_infos"], metadata_2=_instrument_metadata
+ metadata_1=_config["metadata_infos"],
+ metadata_2=_instrument_metadata,
):
_config["list_sample"].append(_dict_file_index)
- _first_images_dict = {"sample": first_sample_image, "ob": {}, "df": {}}
- _last_images_dict = {"sample": last_sample_image, "ob": {}, "df": {}}
+ _first_images_dict = {
+ "sample": first_sample_image,
+ "ob": {},
+ "df": {},
+ }
+ _last_images_dict = {
+ "sample": last_sample_image,
+ "ob": {},
+ "df": {},
+ }
_config["first_images"] = _first_images_dict
_config["last_images"] = _last_images_dict
_found_a_match = True
if not _found_a_match:
- _first_images_dict = {"sample": first_sample_image, "ob": {}, "df": {}}
- _last_images_dict = {"sample": last_sample_image, "ob": {}, "df": {}}
+ _first_images_dict = {
+ "sample": first_sample_image,
+ "ob": {},
+ "df": {},
+ }
+ _last_images_dict = {
+ "sample": last_sample_image,
+ "ob": {},
+ "df": {},
+ }
_temp_dict = {
"list_sample": [_dict_file_index],
@@ -309,16 +393,31 @@ def create_master_sample_dict(self):
"last_images": _last_images_dict,
"list_ob": [],
"list_df": [],
- "time_range_s_selected": {"before": np.nan, "after": np.nan},
+ "time_range_s_selected": {
+ "before": np.nan,
+ "after": np.nan,
+ },
"time_range_s": {"before": np.nan, "after": np.nan},
- "metadata_infos": Get.get_instrument_metadata_only(_instrument_metadata),
+ "metadata_infos": Get.get_instrument_metadata_only(
+ _instrument_metadata
+ ),
}
nbr_config = len(_dict_for_this_acquisition_time.keys())
- _dict_for_this_acquisition_time[f"config{nbr_config}"] = _temp_dict
+ _dict_for_this_acquisition_time[f"config{nbr_config}"] = (
+ _temp_dict
+ )
else:
- _first_images_dict = {"sample": first_sample_image, "ob": {}, "df": {}}
- _last_images_dict = {"sample": last_sample_image, "ob": {}, "df": {}}
+ _first_images_dict = {
+ "sample": first_sample_image,
+ "ob": {},
+ "df": {},
+ }
+ _last_images_dict = {
+ "sample": last_sample_image,
+ "ob": {},
+ "df": {},
+ }
_temp_dict = {
"list_sample": [_dict_file_index],
@@ -328,7 +427,9 @@ def create_master_sample_dict(self):
"list_df": [],
"time_range_s_selected": {"before": np.nan, "after": np.nan},
"time_range_s": {"before": np.nan, "after": np.nan},
- "metadata_infos": Get.get_instrument_metadata_only(_instrument_metadata),
+ "metadata_infos": Get.get_instrument_metadata_only(
+ _instrument_metadata
+ ),
}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]["config0"] = _temp_dict
@@ -375,19 +476,29 @@ def calculate_time_range(self):
for _config in current_acquisition_dict.keys():
current_acquisition_config_dict = current_acquisition_dict[_config]
- first_sample_image = current_acquisition_config_dict["first_images"]["sample"]
+ first_sample_image = current_acquisition_config_dict["first_images"][
+ "sample"
+ ]
first_ob_image = current_acquisition_config_dict["first_images"]["ob"]
- delta_time_before = first_sample_image.get("time_stamp", 0) - first_ob_image.get("time_stamp", 0)
+ delta_time_before = first_sample_image.get(
+ "time_stamp", 0
+ ) - first_ob_image.get("time_stamp", 0)
_time_range_s_before = delta_time_before if delta_time_before > 0 else 0
last_sample_image = current_acquisition_config_dict["last_images"]["sample"]
last_ob_image = current_acquisition_config_dict["last_images"]["ob"]
- delta_time_after = last_ob_image.get("time_stamp", 0) - last_sample_image.get("time_stamp", 0)
+ delta_time_after = last_ob_image.get(
+ "time_stamp", 0
+ ) - last_sample_image.get("time_stamp", 0)
_time_range_s_after = delta_time_after if delta_time_after > 0 else 0
- _final_full_master_dict[_acquisition][_config]["time_range_s"]["before"] = _time_range_s_before
- _final_full_master_dict[_acquisition][_config]["time_range_s"]["after"] = _time_range_s_after
+ _final_full_master_dict[_acquisition][_config]["time_range_s"]["before"] = (
+ _time_range_s_before
+ )
+ _final_full_master_dict[_acquisition][_config]["time_range_s"]["after"] = (
+ _time_range_s_after
+ )
def display_time_range_selection_widgets(self):
_final_full_master_dict = self.final_full_master_dict
@@ -396,7 +507,9 @@ def display_time_range_selection_widgets(self):
o_get = Get(parent=self)
- for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()):
+ for _acquisition_index, _acquisition in enumerate(
+ _final_full_master_dict.keys()
+ ):
_dict_of_this_acquisition = _final_full_master_dict[_acquisition]
_config_tab = widgets.Tab()
@@ -411,8 +524,12 @@ def display_time_range_selection_widgets(self):
_current_acquisition_tab_widgets_id[_index] = _config_widgets_id_dict
_config_tab_dict[_acquisition_index] = _current_acquisition_tab_widgets_id
- _acquisition_tabs.children += (_config_tab,) # add all the config tab to top acquisition tab
- _acquisition_tabs.set_title(_acquisition_index, f"Acquisition: {_acquisition}s")
+ _acquisition_tabs.children += (
+ _config_tab,
+ ) # add all the config tab to top acquisition tab
+ _acquisition_tabs.set_title(
+ _acquisition_index, f"Acquisition: {_acquisition}s"
+ )
_config_tab
display(_acquisition_tabs)
@@ -423,8 +540,12 @@ def display_time_range_selection_widgets(self):
def calculate_max_time_before_and_after_exp_for_this_config(self, dict_config):
max_time_before = 0
- first_sample_image_time_stamp = dict_config["first_images"]["sample"]["time_stamp"]
- first_ob_image_time_stamp = dict_config["first_images"]["ob"].get("time_stamp", 0)
+ first_sample_image_time_stamp = dict_config["first_images"]["sample"][
+ "time_stamp"
+ ]
+ first_ob_image_time_stamp = dict_config["first_images"]["ob"].get(
+ "time_stamp", 0
+ )
if first_ob_image_time_stamp > first_sample_image_time_stamp:
max_time_before = 0
@@ -433,7 +554,9 @@ def calculate_max_time_before_and_after_exp_for_this_config(self, dict_config):
max_time_after = 0
- last_sample_image_time_stamp = dict_config["last_images"]["sample"]["time_stamp"]
+ last_sample_image_time_stamp = dict_config["last_images"]["sample"][
+ "time_stamp"
+ ]
last_ob_image_time_stamp = dict_config["last_images"]["ob"].get("time_stamp", 0)
if last_ob_image_time_stamp < last_sample_image_time_stamp:
@@ -445,11 +568,16 @@ def calculate_max_time_before_and_after_exp_for_this_config(self, dict_config):
def populate_metadata_table(self, current_config):
metadata_config = current_config["metadata_infos"]
- table_label = widgets.Label("List of Metadata used to match data set", layout=widgets.Layout(width="30%"))
+ table_label = widgets.Label(
+ "List of Metadata used to match data set",
+ layout=widgets.Layout(width="30%"),
+ )
table_value = ""
for _key, _value in metadata_config.items():
- table_value += "| {} | {} |
".format(_value["name"], _value["value"])
+ table_value += "| {} | {} |
".format(
+ _value["name"], _value["value"]
+ )
table_value += "
"
table = widgets.HTML(value=table_value)
@@ -470,7 +598,9 @@ def update_config_widgets(self, state):
visibility = "visible"
o_get = Get(parent=self)
- [time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config()
+ [time_before_selected_ui, time_after_selected_ui] = (
+ o_get.time_before_and_after_ui_of_this_config()
+ )
experiment_label_ui = o_get.experiment_label_ui_of_this_config()
experiment_label_ui.layout.visibility = visibility
@@ -489,10 +619,16 @@ def show_or_not_before_and_after_sliders(self):
self.calculate_max_time_before_and_after_exp_for_this_config(current_config)
)
- slider_before_visibility = "visible" if max_time_elapse_before_experiment > 0 else "hidden"
- slider_after_visibility = "visible" if max_time_elapse_after_experiment > 0 else "hidden"
+ slider_before_visibility = (
+ "visible" if max_time_elapse_before_experiment > 0 else "hidden"
+ )
+ slider_after_visibility = (
+ "visible" if max_time_elapse_after_experiment > 0 else "hidden"
+ )
- [time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config()
+ [time_before_selected_ui, time_after_selected_ui] = (
+ o_get.time_before_and_after_ui_of_this_config()
+ )
time_before_selected_ui.layout.visibility = slider_before_visibility
time_after_selected_ui.layout.visibility = slider_after_visibility
@@ -515,7 +651,9 @@ def update_list_of_files_in_widgets_using_new_time_range(self):
# retrieve list of ob and df for this config for this acquisition
final_full_master_dict = self.final_full_master_dict
- dict_for_this_config = final_full_master_dict[float(acquisition_key)][config_key]
+ dict_for_this_config = final_full_master_dict[float(acquisition_key)][
+ config_key
+ ]
list_ob = dict_for_this_config["list_ob"]
# no need to do anything more if user wants to use all the files
@@ -524,22 +662,30 @@ def update_list_of_files_in_widgets_using_new_time_range(self):
else:
# retrieve first and last sample file for this config and for this acquisition
- first_sample_image_time_stamp = dict_for_this_config["first_images"]["sample"]["time_stamp"]
- last_sample_images_time_stamp = dict_for_this_config["last_images"]["sample"]["time_stamp"]
+ first_sample_image_time_stamp = dict_for_this_config["first_images"][
+ "sample"
+ ]["time_stamp"]
+ last_sample_images_time_stamp = dict_for_this_config["last_images"][
+ "sample"
+ ]["time_stamp"]
# retrieve time before and after selected
- [time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config()
+ [time_before_selected, time_after_selected] = (
+ o_get.time_before_and_after_of_this_config()
+ )
# calculate list of ob that are within that time range
list_ob_to_keep = []
for _ob_file in list_ob:
_ob_time_stamp = _ob_file["time_stamp"]
if (_ob_time_stamp < first_sample_image_time_stamp) and (
- (first_sample_image_time_stamp - _ob_time_stamp) <= np.abs(time_before_selected)
+ (first_sample_image_time_stamp - _ob_time_stamp)
+ <= np.abs(time_before_selected)
):
list_ob_to_keep.append(_ob_file["filename"])
elif (_ob_time_stamp > last_sample_images_time_stamp) and (
- (_ob_time_stamp - last_sample_images_time_stamp) <= np.abs(time_after_selected)
+ (_ob_time_stamp - last_sample_images_time_stamp)
+ <= np.abs(time_after_selected)
):
list_ob_to_keep.append(_ob_file["filename"])
@@ -549,9 +695,13 @@ def update_list_of_ob_for_current_config_tab(self, list_ob=[]):
o_get = Get(parent=self)
[active_acquisition, active_config] = o_get.active_tabs()
# short_version_list_ob = NormalizationWithSimplifySelection.keep_basename_only(list_files=list_ob)
- self.config_tab_dict[active_acquisition][active_config]["list_of_ob"].options = list_ob
+ self.config_tab_dict[active_acquisition][active_config][
+ "list_of_ob"
+ ].options = list_ob
# select everything by default
- self.config_tab_dict[active_acquisition][active_config]["list_of_ob"].value = list_ob
+ self.config_tab_dict[active_acquisition][active_config][
+ "list_of_ob"
+ ].value = list_ob
def update_time_range_message(self, value):
o_get = Get(parent=self)
@@ -564,7 +714,9 @@ def update_time_range_message(self, value):
# "OBs and DFs " \
# "matching the samples images"
else:
- [time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config()
+ [time_before_selected, time_after_selected] = (
+ o_get.time_before_and_after_of_this_config()
+ )
time_before_selected = np.abs(time_before_selected)
@@ -585,7 +737,9 @@ def _format_time(_time_s):
str_time_before = _format_time(time_before_selected)
str_time_after = _format_time(time_after_selected)
- logging.info(f"str_time_before: {time_before_selected} -> {str_time_before}")
+ logging.info(
+ f"str_time_before: {time_before_selected} -> {str_time_before}"
+ )
_message = (
"Use OB taken up to " + str_time_before + " "
@@ -594,7 +748,9 @@ def _format_time(_time_s):
"after experiment!"
)
- time_before_and_after_message_ui = o_get.time_before_and_after_message_ui_of_this_config()
+ time_before_and_after_message_ui = (
+ o_get.time_before_and_after_message_ui_of_this_config()
+ )
time_before_and_after_message_ui.value = _message
def do_you_want_to_combine_changed(self, value):
@@ -606,7 +762,9 @@ def do_you_want_to_combine_changed(self, value):
o_get = Get(parent=self)
[active_acquisition, active_config] = o_get.active_tabs()
- self.config_tab_dict[active_acquisition][active_config]["how_to_combine"].disabled = disabled_how_to_combine
+ self.config_tab_dict[active_acquisition][active_config][
+ "how_to_combine"
+ ].disabled = disabled_how_to_combine
self.update_this_config_table()
def how_to_combine_changed(self, value):
@@ -617,20 +775,44 @@ def update_this_config_table(self):
[active_acquisition, active_config] = o_get.active_tabs()
table_ui = self.config_tab_dict[active_acquisition][active_config]["table"]
- nbr_ob = len(self.config_tab_dict[active_acquisition][active_config]["list_of_ob"].value)
- nbr_sample = len(self.config_tab_dict[active_acquisition][active_config]["list_of_sample_runs"].options)
- nbr_df = len(self.config_tab_dict[active_acquisition][active_config]["list_of_df"].value)
+ nbr_ob = len(
+ self.config_tab_dict[active_acquisition][active_config]["list_of_ob"].value
+ )
+ nbr_sample = len(
+ self.config_tab_dict[active_acquisition][active_config][
+ "list_of_sample_runs"
+ ].options
+ )
+ nbr_df = len(
+ self.config_tab_dict[active_acquisition][active_config]["list_of_df"].value
+ )
- force_combine_disabled_state = self.config_tab_dict[active_acquisition][active_config]["force_combine"].disabled
- force_combine_value = self.config_tab_dict[active_acquisition][active_config]["force_combine"].value
- how_to_combine_value = self.config_tab_dict[active_acquisition][active_config]["how_to_combine"].value
+ force_combine_disabled_state = self.config_tab_dict[active_acquisition][
+ active_config
+ ]["force_combine"].disabled
+ force_combine_value = self.config_tab_dict[active_acquisition][active_config][
+ "force_combine"
+ ].value
+ how_to_combine_value = self.config_tab_dict[active_acquisition][active_config][
+ "how_to_combine"
+ ].value
if force_combine_value == "yes":
- description = "OBs will be combined using " + how_to_combine_value + " method!"
+ description = (
+ "OBs will be combined using "
+ + how_to_combine_value
+ + " method!"
+ )
elif force_combine_disabled_state:
- description = "OBs will be combined using " + how_to_combine_value + " method!"
+ description = (
+ "OBs will be combined using "
+ + how_to_combine_value
+ + " method!"
+ )
else:
- description = "OBs won't be combined! Each sample will use only 1 OB!"
+ description = (
+ "OBs won't be combined! Each sample will use only 1 OB!"
+ )
html_table = (
f""
@@ -656,15 +838,25 @@ def selection_of_ob_changed(self, value):
nbr_ob = len(list_ob_selected)
o_get = Get(parent=self)
[active_acquisition, active_config] = o_get.active_tabs()
- list_sample = self.config_tab_dict[active_acquisition][active_config]["list_of_sample_runs"].options
+ list_sample = self.config_tab_dict[active_acquisition][active_config][
+ "list_of_sample_runs"
+ ].options
nbr_sample = len(list_sample)
if nbr_sample == nbr_ob:
- self.config_tab_dict[active_acquisition][active_config]["force_combine"].disabled = False
- self.config_tab_dict[active_acquisition][active_config]["force_combine_message"].value = ""
+ self.config_tab_dict[active_acquisition][active_config][
+ "force_combine"
+ ].disabled = False
+ self.config_tab_dict[active_acquisition][active_config][
+ "force_combine_message"
+ ].value = ""
else:
- self.config_tab_dict[active_acquisition][active_config]["force_combine"].disabled = True
- self.config_tab_dict[active_acquisition][active_config]["force_combine_message"].value = (
+ self.config_tab_dict[active_acquisition][active_config][
+ "force_combine"
+ ].disabled = True
+ self.config_tab_dict[active_acquisition][active_config][
+ "force_combine_message"
+ ].value = (
"INFO: the option to combine or not is disabled as the number of "
"sample "
"and "
@@ -681,20 +873,28 @@ def create_final_json(self):
_config_tab_dict = self.config_tab_dict
_final_json_dict = {}
- for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()):
+ for _acquisition_index, _acquisition in enumerate(
+ _final_full_master_dict.keys()
+ ):
_final_json_for_this_acquisition = {}
_config_of_this_acquisition = _config_tab_dict[_acquisition_index]
_dict_of_this_acquisition = _final_full_master_dict[_acquisition]
for _config_index, _config in enumerate(_dict_of_this_acquisition.keys()):
- this_config_tab_dict = _config_tab_dict[_acquisition_index][_config_index]
+ this_config_tab_dict = _config_tab_dict[_acquisition_index][
+ _config_index
+ ]
normalize_flag = this_config_tab_dict["use_this_config"]
list_sample = this_config_tab_dict["list_of_sample_runs"].options
list_ob = this_config_tab_dict["list_of_ob"].value
list_df = this_config_tab_dict["list_of_df"].value
- force_combine_disabled_state = this_config_tab_dict["force_combine"].disabled # True or false
- force_combine_value = this_config_tab_dict["force_combine"].value # 'yes' or 'no'
+ force_combine_disabled_state = this_config_tab_dict[
+ "force_combine"
+ ].disabled # True or false
+ force_combine_value = this_config_tab_dict[
+ "force_combine"
+ ].value # 'yes' or 'no'
how_to_combine_value = this_config_tab_dict["how_to_combine"].value
roi = this_config_tab_dict.get("roi_selected", None)
@@ -723,16 +923,24 @@ def create_final_json(self):
def roi_button_clicked(self, value):
o_get = Get(parent=self)
[active_acquisition, active_config] = o_get.active_tabs()
- list_sample = self.config_tab_dict[active_acquisition][active_config]["list_of_sample_runs"].options
+ list_sample = self.config_tab_dict[active_acquisition][active_config][
+ "list_of_sample_runs"
+ ].options
self.config_tab_dict[active_acquisition][active_config][
"select_roi_button"
].description = TEMPORARY_ROI_BUTTON_DESCRIPTION
- self.config_tab_dict[active_acquisition][active_config]["select_roi_button"].icon = TEMPORARY_ROI_ICON
- self.config_tab_dict[active_acquisition][active_config]["select_roi_button"].disabled = True
+ self.config_tab_dict[active_acquisition][active_config][
+ "select_roi_button"
+ ].icon = TEMPORARY_ROI_ICON
+ self.config_tab_dict[active_acquisition][active_config][
+ "select_roi_button"
+ ].disabled = True
o_gui = Interface(
- list_of_files=list_sample, callback=self.returning_from_roi_selection, display_info_message=False
+ list_of_files=list_sample,
+ callback=self.returning_from_roi_selection,
+ display_info_message=False,
)
o_gui.show()
QtGui.QGuiApplication.processEvents()
@@ -740,12 +948,18 @@ def roi_button_clicked(self, value):
def returning_from_roi_selection(self, roi_selected):
o_get = Get(parent=self)
[active_acquisition, active_config] = o_get.active_tabs()
- self.config_tab_dict[active_acquisition][active_config]["roi_selected"] = roi_selected
+ self.config_tab_dict[active_acquisition][active_config]["roi_selected"] = (
+ roi_selected
+ )
self.config_tab_dict[active_acquisition][active_config][
"select_roi_button"
].description = ROI_BUTTON_DESCRIPTION
- self.config_tab_dict[active_acquisition][active_config]["select_roi_button"].icon = ROI_ICON
- self.config_tab_dict[active_acquisition][active_config]["select_roi_button"].disabled = False
+ self.config_tab_dict[active_acquisition][active_config][
+ "select_roi_button"
+ ].icon = ROI_ICON
+ self.config_tab_dict[active_acquisition][active_config][
+ "select_roi_button"
+ ].disabled = False
def normalization_recap(self):
"""this will show all the config that will be run and if they have the minimum requirements or not,
@@ -798,7 +1012,7 @@ def normalization_recap(self):
display(table_ui)
def select_output_folder(self):
- #self.output_folder_ui = myfileselector.MyFileSelectorPanel(
+ # self.output_folder_ui = myfileselector.MyFileSelectorPanel(
self.output_folder_ui = myfileselector.FileSelectorPanelWithJumpFolders(
instruction="select where to create the " + "normalized folders",
start_dir=self.working_dir,
@@ -809,7 +1023,7 @@ def select_output_folder(self):
newdir_toolbar_button=True,
)
# display(self.output_folder_ui)
-
+
def normalization(self, output_folder):
display(
HTML(
@@ -825,8 +1039,14 @@ def normalization(self, output_folder):
horizontal_layout = widgets.HBox(
[
- widgets.Label("Normalization progress", layout=widgets.Layout(width="20%")),
- widgets.IntProgress(max=number_of_normalization + 1, value=0, layout=widgets.Layout(width="50%")),
+ widgets.Label(
+ "Normalization progress", layout=widgets.Layout(width="20%")
+ ),
+ widgets.IntProgress(
+ max=number_of_normalization + 1,
+ value=0,
+ layout=widgets.Layout(width="50%"),
+ ),
]
)
normalization_progress = horizontal_layout.children[1]
@@ -848,13 +1068,17 @@ def normalization(self, output_folder):
continue
list_sample = _current_config["list_sample"]
- full_output_normalization_folder_name = utilities.make_full_output_normalization_folder_name(
- output_folder=output_folder,
- first_sample_file_name=list_sample[0],
- name_acquisition=_name_acquisition,
- name_config=_name_config,
+ full_output_normalization_folder_name = (
+ utilities.make_full_output_normalization_folder_name(
+ output_folder=output_folder,
+ first_sample_file_name=list_sample[0],
+ name_acquisition=_name_acquisition,
+ name_config=_name_config,
+ )
+ )
+ list_full_output_normalization_folder_name.append(
+ full_output_normalization_folder_name
)
- list_full_output_normalization_folder_name.append(full_output_normalization_folder_name)
list_df = _current_config["list_df"]
o_load = Normalization()
@@ -872,7 +1096,12 @@ def normalization(self, output_folder):
list_roi = []
for _key in roi.keys():
_roi_item = roi[_key]
- _roi = ROI(x0=_roi_item["x0"], y0=_roi_item["y0"], x1=_roi_item["x1"], y1=_roi_item["y1"])
+ _roi = ROI(
+ x0=_roi_item["x0"],
+ y0=_roi_item["y0"],
+ x1=_roi_item["x1"],
+ y1=_roi_item["y1"],
+ )
list_roi.append(_roi)
else:
list_roi = None
@@ -885,16 +1114,28 @@ def normalization(self, output_folder):
else:
o_load.normalization(force_median_ob=True, roi=list_roi)
- o_load.export(folder=full_output_normalization_folder_name, file_type="tif")
+ o_load.export(
+ folder=full_output_normalization_folder_name, file_type="tif"
+ )
del o_load
normalization_progress.value += 1
horizontal_layout.close()
- display(HTML('The following folders have been created:'))
+ display(
+ HTML(
+ 'The following folders have been created:'
+ )
+ )
for _folder in list_full_output_normalization_folder_name:
_folder = _folder if _folder else "None"
- display(HTML(' -> ' + _folder + ""))
+ display(
+ HTML(
+ ' -> '
+ + _folder
+ + ""
+ )
+ )
- print("Normalization is done!")
\ No newline at end of file
+ print("Normalization is done!")
diff --git a/notebooks/__code/normalization/utilities.py b/notebooks/__code/normalization/utilities.py
index 7b3142fd..6095574b 100755
--- a/notebooks/__code/normalization/utilities.py
+++ b/notebooks/__code/normalization/utilities.py
@@ -14,8 +14,12 @@ def make_full_output_normalization_folder_name(
):
basename_sample_folder = os.path.basename(os.path.dirname(first_sample_file_name))
basename_sample_folder += f"_{name_acquisition}_{name_config}"
- full_basename_sample_folder = os.path.abspath(os.path.join(output_folder, basename_sample_folder))
- full_basename_sample_folder = make_or_increment_folder_name(full_basename_sample_folder)
+ full_basename_sample_folder = os.path.abspath(
+ os.path.join(output_folder, basename_sample_folder)
+ )
+ full_basename_sample_folder = make_or_increment_folder_name(
+ full_basename_sample_folder
+ )
return full_basename_sample_folder
@@ -78,7 +82,12 @@ def all_metadata_match(metadata_1={}, metadata_2={}, list_key_to_check=None):
for _key in list_key:
try:
- if np.abs(float(metadata_1[_key]["value"]) - float(metadata_2[_key]["value"])) > METADATA_ERROR_ALLOWED:
+ if (
+ np.abs(
+ float(metadata_1[_key]["value"]) - float(metadata_2[_key]["value"])
+ )
+ > METADATA_ERROR_ALLOWED
+ ):
return False
except ValueError:
if metadata_1[_key]["value"] != metadata_2[_key]["value"]:
@@ -129,7 +138,10 @@ def isolate_infos_from_file_index(index=-1, dictionary=None, all_keys=False):
for _image in dictionary["list_images"].keys():
_time_image = dictionary["list_time_stamp"][index]
_user_format_time_image = dictionary["list_time_stamp_user_format"][index]
- result_dictionary[_image] = {"system_time": _time_image, "user_format_time": _user_format_time_image}
+ result_dictionary[_image] = {
+ "system_time": _time_image,
+ "user_format_time": _user_format_time_image,
+ }
else:
_image = dictionary["list_images"][index]
_time_image = dictionary["list_time_stamp"][index]
diff --git a/notebooks/__code/normalization_batch.py b/notebooks/__code/normalization_batch.py
index 30475cca..db1b2cdc 100755
--- a/notebooks/__code/normalization_batch.py
+++ b/notebooks/__code/normalization_batch.py
@@ -83,7 +83,14 @@ class Panel:
df_panel = None
top_object = None
- def __init__(self, prev_button=False, next_button=True, state="sample", working_dir="", top_object=None):
+ def __init__(
+ self,
+ prev_button=False,
+ next_button=True,
+ state="sample",
+ working_dir="",
+ top_object=None,
+ ):
self.prev_button = prev_button
self.next_button = next_button
self.state = state
@@ -124,7 +131,10 @@ def __top_panel(self):
title_ui = widgets.HBox(
[
widgets.Label("Instructions:", layout=widgets.Layout(width="20%")),
- widgets.Label("Select Samples Images and click NEXT", layout=widgets.Layout(width="50%")),
+ widgets.Label(
+ "Select Samples Images and click NEXT",
+ layout=widgets.Layout(width="50%"),
+ ),
]
)
@@ -134,7 +144,9 @@ def __top_panel(self):
widgets.Label("None", layout=widgets.Layout(width="50%")),
]
)
- self.title = title_ui.children[1] # "Select [Samples/OB/DF] Images [and click NEXT]
+ self.title = title_ui.children[
+ 1
+ ] # "Select [Samples/OB/DF] Images [and click NEXT]
self.label = label_ui.children[1] # number of samples selected
self.top_panel = widgets.VBox(children=[title_ui, label_ui], layout=self.layout)
@@ -158,7 +170,9 @@ def __bottom_panel(self):
self.prev_button_ui.on_click(self.prev_button_clicked)
list_ui.append(self.prev_button_ui)
- self.current_state_label_ui = widgets.Label(" ", layout=widgets.Layout(width="70%"))
+ self.current_state_label_ui = widgets.Label(
+ " ", layout=widgets.Layout(width="70%")
+ )
list_ui.append(self.current_state_label_ui)
if self.next_button:
@@ -194,7 +208,9 @@ def nextStep(self):
class WizardPanel:
- label_layout = Layout(border="1px lighgray solide", height="35px", padding="8px", width="300px")
+ label_layout = Layout(
+ border="1px lighgray solide", height="35px", padding="8px", width="300px"
+ )
sample_panel = None
def __init__(self, sample_panel=None):
@@ -210,9 +226,14 @@ class SampleSelectionPanel(Panel):
files = None
o_norm = None
- def __init__(self, prev_button=False, next_button=True, working_dir="", top_object=None):
+ def __init__(
+ self, prev_button=False, next_button=True, working_dir="", top_object=None
+ ):
super(SampleSelectionPanel, self).__init__(
- prev_button=prev_button, next_button=next_button, working_dir=working_dir, top_object=top_object
+ prev_button=prev_button,
+ next_button=next_button,
+ working_dir=working_dir,
+ top_object=top_object,
)
def next_button_clicked(self, event):
@@ -230,13 +251,17 @@ def __init__(self, working_dir="", top_object=None):
def next_button_clicked(self, event):
self.remove()
- _panel = DFSelectionPanel(working_dir=self.working_dir, top_object=self.top_object)
+ _panel = DFSelectionPanel(
+ working_dir=self.working_dir, top_object=self.top_object
+ )
_panel.init_ui(files=self.files)
_panel.show()
def prev_button_clicked(self, event):
self.remove()
- _panel = SampleSelectionPanel(working_dir=self.working_dir, top_object=self.top_object)
+ _panel = SampleSelectionPanel(
+ working_dir=self.working_dir, top_object=self.top_object
+ )
_panel.init_ui(files=self.files)
_panel.show()
@@ -245,18 +270,26 @@ class DFSelectionPanel(Panel):
def __init__(self, working_dir="", top_object=None):
self.working_dir = working_dir
super(DFSelectionPanel, self).__init__(
- prev_button=True, next_button=True, state="df", working_dir=working_dir, top_object=top_object
+ prev_button=True,
+ next_button=True,
+ state="df",
+ working_dir=working_dir,
+ top_object=top_object,
)
def prev_button_clicked(self, event):
self.remove()
- _panel = OBSelectionPanel(working_dir=self.working_dir, top_object=self.top_object)
+ _panel = OBSelectionPanel(
+ working_dir=self.working_dir, top_object=self.top_object
+ )
_panel.init_ui(files=self.files)
_panel.show()
def next_button_clicked(self, event):
self.remove()
- o_norm_handler = NormalizationHandler(files=self.files, working_dir=self.working_dir)
+ o_norm_handler = NormalizationHandler(
+ files=self.files, working_dir=self.working_dir
+ )
o_norm_handler.load_data()
self.top_object.o_norm_handler = o_norm_handler
self.top_object.o_norm = o_norm_handler.o_norm
@@ -324,9 +357,15 @@ def display_file_selector_from_home(ev):
self.hbox = widgets.HBox(
[
widgets.Button(
- description=f"Jump to {ipts} Shared Folder", button_style="success", layout=button_layout
+ description=f"Jump to {ipts} Shared Folder",
+ button_style="success",
+ layout=button_layout,
+ ),
+ widgets.Button(
+ description="Jump to My Home Folder",
+ button_style="success",
+ layout=button_layout,
),
- widgets.Button(description="Jump to My Home Folder", button_style="success", layout=button_layout),
]
)
go_to_shared_button_ui = self.hbox.children[0]
@@ -352,8 +391,12 @@ def remove_buttons(ev):
self.output_folder_ui.show()
def export(self, rois={}):
- base_folder = os.path.basename(os.path.dirname(self.list_file_names[0])) + "_normalized"
- output_folder = os.path.abspath(os.path.join(self.output_folder_ui.selected, base_folder))
+ base_folder = (
+ os.path.basename(os.path.dirname(self.list_file_names[0])) + "_normalized"
+ )
+ output_folder = os.path.abspath(
+ os.path.join(self.output_folder_ui.selected, base_folder)
+ )
utilities.make_dir(dir=output_folder)
self.normalized(rois=rois, output_folder=output_folder)
diff --git a/notebooks/__code/normalization_resonance/normalization_for_timepix.py b/notebooks/__code/normalization_resonance/normalization_for_timepix.py
index 245f8c22..c4d9b694 100644
--- a/notebooks/__code/normalization_resonance/normalization_for_timepix.py
+++ b/notebooks/__code/normalization_resonance/normalization_for_timepix.py
@@ -73,7 +73,9 @@ def _worker(fl):
return (imread(fl).astype(LOAD_DTYPE)).swapaxes(0, 1)
-def load_data_using_multithreading(list_tif: list = None, combine_tof: bool = False) -> np.ndarray:
+def load_data_using_multithreading(
+ list_tif: list = None, combine_tof: bool = False
+) -> np.ndarray:
"""load data using multithreading"""
with mp.Pool(processes=40) as pool:
data = pool.map(_worker, list_tif)
@@ -92,7 +94,9 @@ def retrieve_list_of_tif(folder: str) -> list:
def create_x_axis_file(
- lambda_array: np.ndarray = None, energy_array: np.ndarray = None, output_folder: str = "./"
+ lambda_array: np.ndarray = None,
+ energy_array: np.ndarray = None,
+ output_folder: str = "./",
) -> str:
"""create x axis file with lambda, energy and tof arrays"""
x_axis_data = {
@@ -160,10 +164,16 @@ def normalization_with_list_of_runs(
export_corrected_stack_of_sample_data = export_mode.get("sample_stack", False)
export_corrected_stack_of_ob_data = export_mode.get("ob_stack", False)
- export_corrected_stack_of_normalized_data = export_mode.get("normalized_stack", False)
- export_corrected_integrated_sample_data = export_mode.get("sample_integrated", False)
+ export_corrected_stack_of_normalized_data = export_mode.get(
+ "normalized_stack", False
+ )
+ export_corrected_integrated_sample_data = export_mode.get(
+ "sample_integrated", False
+ )
export_corrected_integrated_ob_data = export_mode.get("ob_integrated", False)
- export_corrected_integrated_normalized_data = export_mode.get("normalized_integrated", False)
+ export_corrected_integrated_normalized_data = export_mode.get(
+ "normalized_integrated", False
+ )
export_x_axis = export_mode.get("x_axis", True)
logging.info(f"{export_corrected_stack_of_sample_data = }")
@@ -181,13 +191,18 @@ def normalization_with_list_of_runs(
nexus_root_path=nexus_path,
)
ob_master_dict, ob_status_metadata = create_master_dict(
- list_run_numbers=ob_run_numbers, data_type=DataType.ob, instrument=instrument, nexus_root_path=nexus_path
+ list_run_numbers=ob_run_numbers,
+ data_type=DataType.ob,
+ instrument=instrument,
+ nexus_root_path=nexus_path,
)
# only for SNAP
if instrument == "SNAP":
for _run in sample_master_dict.keys():
- sample_master_dict[_run][MasterDictKeys.detector_delay_us] = detector_delay_us
+ sample_master_dict[_run][MasterDictKeys.detector_delay_us] = (
+ detector_delay_us
+ )
for _run in ob_master_dict.keys():
ob_master_dict[_run][MasterDictKeys.detector_delay_us] = detector_delay_us
@@ -197,25 +212,32 @@ def normalization_with_list_of_runs(
logging.info(f"loading ob# {_ob_run_number} ... ")
if verbose:
display(HTML(f"Loading ob# {_ob_run_number} ..."))
- ob_master_dict[_ob_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- ob_master_dict[_ob_run_number][MasterDictKeys.list_tif], combine_tof=False
+ ob_master_dict[_ob_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ ob_master_dict[_ob_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"ob# {_ob_run_number} loaded!")
logging.info(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
if verbose:
display(HTML(f"ob# {_ob_run_number} loaded!"))
- display(HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
+ )
if proton_charge_flag:
normalized_by_proton_charge = (
- sample_status_metadata.all_proton_charge_found and ob_status_metadata.all_proton_charge_found
+ sample_status_metadata.all_proton_charge_found
+ and ob_status_metadata.all_proton_charge_found
)
else:
normalized_by_proton_charge = False
if shutter_counts_flag:
normalized_by_shutter_counts = (
- sample_status_metadata.all_shutter_counts_found and ob_status_metadata.all_shutter_counts_found
+ sample_status_metadata.all_shutter_counts_found
+ and ob_status_metadata.all_shutter_counts_found
)
else:
normalized_by_shutter_counts = False
@@ -248,7 +270,9 @@ def normalization_with_list_of_runs(
export_corrected_stack_of_ob_data,
export_corrected_integrated_ob_data,
ob_data_combined,
- spectra_file_name=ob_master_dict[_ob_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=ob_master_dict[_ob_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
# load sample images
@@ -256,22 +280,33 @@ def normalization_with_list_of_runs(
logging.info(f"loading sample# {_sample_run_number} ... ")
if verbose:
display(HTML(f"Loading sample# {_sample_run_number} ..."))
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- sample_master_dict[_sample_run_number][MasterDictKeys.list_tif], combine_tof=False
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ sample_master_dict[_sample_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"sample# {_sample_run_number} loaded!")
- logging.info(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }")
+ logging.info(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
if verbose:
display(HTML(f"sample# {_sample_run_number} loaded!"))
- display(HTML(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
+ )
if correct_chips_alignment_flag:
logging.info("Correcting chips alignment ...")
if verbose:
display(HTML("Correcting chips alignment ..."))
for _sample_run_number in sample_master_dict.keys():
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = correct_chips_alignment(
- sample_master_dict[_sample_run_number][MasterDictKeys.data]
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ correct_chips_alignment(
+ sample_master_dict[_sample_run_number][MasterDictKeys.data]
+ )
)
logging.info("Chips alignment corrected!")
if verbose:
@@ -303,17 +338,25 @@ def normalization_with_list_of_runs(
logging.info("**********************************")
if normalized_by_proton_charge:
- proton_charge = sample_master_dict[_sample_run_number][MasterDictKeys.proton_charge]
+ proton_charge = sample_master_dict[_sample_run_number][
+ MasterDictKeys.proton_charge
+ ]
_sample_data = _sample_data / proton_charge
if normalized_by_shutter_counts:
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=sample_master_dict[_sample_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=sample_master_dict[_sample_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
sample_data = []
- for _sample, _shutter_value in zip(_sample_data, list_shutter_values_for_each_image, strict=False):
+ for _sample, _shutter_value in zip(
+ _sample_data, list_shutter_values_for_each_image, strict=False
+ ):
sample_data.append(_sample / _shutter_value)
_sample_data = np.array(sample_data)
@@ -323,14 +366,19 @@ def normalization_with_list_of_runs(
logging.info(f"{ob_data_combined.dtype = }")
# export sample data after correction if requested
- if export_corrected_stack_of_sample_data or export_corrected_integrated_sample_data:
+ if (
+ export_corrected_stack_of_sample_data
+ or export_corrected_integrated_sample_data
+ ):
export_sample_images(
output_folder,
export_corrected_stack_of_sample_data,
export_corrected_integrated_sample_data,
_sample_run_number,
_sample_data,
- spectra_file_name=sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
# _sample_data = np.divide(_sample_data, ob_data_combined, out=np.zeros_like(_sample_data), where=ob_data_combined!=0)
@@ -350,8 +398,12 @@ def normalization_with_list_of_runs(
logging.info(f"{normalized_data[_sample_run_number].shape = }")
logging.info(f"{normalized_data[_sample_run_number].dtype = }")
- detector_delay_us = sample_master_dict[_sample_run_number][MasterDictKeys.detector_delay_us]
- time_spectra = sample_master_dict[_sample_run_number][MasterDictKeys.list_spectra]
+ detector_delay_us = sample_master_dict[_sample_run_number][
+ MasterDictKeys.detector_delay_us
+ ]
+ time_spectra = sample_master_dict[_sample_run_number][
+ MasterDictKeys.list_spectra
+ ]
lambda_array = convert_array_from_time_to_lambda(
time_array=time_spectra,
@@ -374,11 +426,15 @@ def normalization_with_list_of_runs(
if preview:
# display preview of normalized data
- fig, axs1 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs1 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
sample_data_integrated = np.nanmean(_sample_data, axis=0)
im0 = axs1[0].imshow(sample_data_integrated, cmap="gray")
plt.colorbar(im0, ax=axs1[0])
- axs1[0].set_title(f"Sample data: {_sample_run_number} | detector delay: {detector_delay_us:.2f} us")
+ axs1[0].set_title(
+ f"Sample data: {_sample_run_number} | detector delay: {detector_delay_us:.2f} us"
+ )
sample_integrated1 = np.nansum(_sample_data, axis=1)
sample_integrated = np.nansum(sample_integrated1, axis=1)
@@ -387,7 +443,9 @@ def normalization_with_list_of_runs(
axs1[1].set_ylabel("mean of full image")
plt.tight_layout
- fig, axs2 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs2 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
ob_data_integrated = np.nanmean(ob_data_combined, axis=0)
im1 = axs2[0].imshow(ob_data_integrated, cmap="gray")
plt.colorbar(im1, ax=axs2[0])
@@ -400,8 +458,12 @@ def normalization_with_list_of_runs(
axs2[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs3 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
- normalized_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ fig, axs3 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
+ normalized_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
im2 = axs3[0].imshow(normalized_data_integrated, cmap="gray")
plt.colorbar(im2, ax=axs3[0])
axs3[0].set_title(f"Normalized data {_sample_run_number}")
@@ -413,7 +475,9 @@ def normalization_with_list_of_runs(
axs3[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs4 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs4 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
axs4[0].plot(lambda_array, profile, "*")
axs4[0].set_xlabel("Lambda (A)")
axs4[0].set_ylabel("mean of full image")
@@ -426,24 +490,36 @@ def normalization_with_list_of_runs(
plt.show()
- if export_corrected_integrated_normalized_data or export_corrected_stack_of_normalized_data:
+ if (
+ export_corrected_integrated_normalized_data
+ or export_corrected_stack_of_normalized_data
+ ):
# make up new output folder name
list_ob_runs = list(ob_master_dict.keys())
- str_ob_runs = "_".join([str(_ob_run_number) for _ob_run_number in list_ob_runs])
+ str_ob_runs = "_".join(
+ [str(_ob_run_number) for _ob_run_number in list_ob_runs]
+ )
full_output_folder = os.path.join(
- output_folder, f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}"
+ output_folder,
+ f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}",
) # issue for WEI here !
full_output_folder = os.path.abspath(full_output_folder)
os.makedirs(full_output_folder, exist_ok=True)
if export_corrected_integrated_normalized_data:
# making up the integrated sample data
- sample_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ sample_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
full_file_name = os.path.join(full_output_folder, "integrated.tif")
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} ...")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} ..."
+ )
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} is done!"
+ )
if export_corrected_stack_of_normalized_data:
output_stack_folder = os.path.join(full_output_folder, "stack")
@@ -451,12 +527,20 @@ def normalization_with_list_of_runs(
os.makedirs(output_stack_folder, exist_ok=True)
for _index, _data in enumerate(normalized_data[_sample_run_number]):
- _output_file = os.path.join(output_stack_folder, f"image{_index:04d}.tif")
+ _output_file = os.path.join(
+ output_stack_folder, f"image{_index:04d}.tif"
+ )
make_tiff(data=_data, filename=_output_file)
- logging.info(f"\t -> Exporting normalized data to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting normalized data to {output_stack_folder} is done!"
+ )
print(f"Exported normalized tif images are in: {output_stack_folder}!")
- spectra_file = sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name]
- logging.info(f"Exported time spectra file {spectra_file} to {output_stack_folder}!")
+ spectra_file = sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ]
+ logging.info(
+ f"Exported time spectra file {spectra_file} to {output_stack_folder}!"
+ )
shutil.copy(spectra_file, output_stack_folder)
# create x-axis file
@@ -475,7 +559,9 @@ def get_detector_offset_from_nexus(nexus_path: str) -> float:
"""get the detector offset from the nexus file"""
with h5py.File(nexus_path, "r") as hdf5_data:
try:
- detector_offset_micros = hdf5_data["entry"]["DASlogs"]["BL10:Det:TH:DSPT1:TIDelay"]["value"][0]
+ detector_offset_micros = hdf5_data["entry"]["DASlogs"][
+ "BL10:Det:TH:DSPT1:TIDelay"
+ ]["value"][0]
except KeyError:
detector_offset_micros = None
return detector_offset_micros
@@ -504,7 +590,9 @@ def export_sample_images(
make_tiff(data=_data, filename=_output_file)
logging.info(f"\t -> Exporting sample data to {output_stack_folder} is done!")
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!"
+ )
if export_corrected_integrated_sample_data:
# making up the integrated sample data
@@ -512,7 +600,9 @@ def export_sample_images(
full_file_name = os.path.join(sample_output_folder, "integrated.tif")
logging.info(f"\t -> Exporting integrated sample data to {full_file_name} ...")
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated sample data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated sample data to {full_file_name} is done!"
+ )
display(HTML(f"Created folder {output_stack_folder} for sample outputs!"))
@@ -529,10 +619,13 @@ def export_ob_images(
logging.info(f"> Exporting combined ob images to {output_folder} ...")
logging.info(f"\t{ob_run_numbers = }")
list_ob_runs_number_only = [
- str(isolate_run_number_from_full_path(_ob_run_number)) for _ob_run_number in ob_run_numbers
+ str(isolate_run_number_from_full_path(_ob_run_number))
+ for _ob_run_number in ob_run_numbers
]
if len(list_ob_runs_number_only) == 1:
- ob_output_folder = os.path.join(output_folder, f"ob_{list_ob_runs_number_only[0]}")
+ ob_output_folder = os.path.join(
+ output_folder, f"ob_{list_ob_runs_number_only[0]}"
+ )
else:
str_list_ob_runs = "_".join(list_ob_runs_number_only)
ob_output_folder = os.path.join(output_folder, f"ob_{str_list_ob_runs}")
@@ -561,12 +654,16 @@ def export_ob_images(
logging.info(f"\t -> Exporting ob data to {output_stack_folder} is done!")
# copy spectra file to the output folder
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!")
+ logging.info(
+ f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!"
+ )
display(HTML(f"Created folder {output_stack_folder} for OB outputs!"))
-def normalization(sample_folder=None, ob_folder=None, output_folder="./", verbose=False):
+def normalization(
+ sample_folder=None, ob_folder=None, output_folder="./", verbose=False
+):
pass
@@ -646,7 +743,9 @@ def update_dict_with_shutter_counts(master_dict: dict) -> tuple[dict, bool]:
if _value == "0":
break
list_shutter_counts.append(float(_value))
- master_dict[run_number][MasterDictKeys.shutter_counts] = list_shutter_counts
+ master_dict[run_number][MasterDictKeys.shutter_counts] = (
+ list_shutter_counts
+ )
return master_dict, status_all_shutter_counts_found
@@ -677,7 +776,9 @@ def update_dict_with_proton_charge(master_dict: dict) -> tuple[dict, bool]:
_nexus_path = master_dict[_run_number][MasterDictKeys.nexus_path]
try:
with h5py.File(_nexus_path, "r") as hdf5_data:
- proton_charge = hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ proton_charge = (
+ hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ )
except KeyError:
proton_charge = None
status_all_proton_charge_found = False
@@ -688,7 +789,9 @@ def update_dict_with_proton_charge(master_dict: dict) -> tuple[dict, bool]:
def update_dict_with_list_of_images(master_dict: dict) -> dict:
"""update the master dict with list of images"""
for _run_number in master_dict.keys():
- list_tif = retrieve_list_of_tif(master_dict[_run_number][MasterDictKeys.data_path])
+ list_tif = retrieve_list_of_tif(
+ master_dict[_run_number][MasterDictKeys.data_path]
+ )
master_dict[_run_number][MasterDictKeys.list_tif] = list_tif
@@ -699,7 +802,9 @@ def get_list_run_number(data_folder: str) -> list:
return list_run_number
-def update_dict_with_nexus_full_path(nexus_root_path: str, instrument: str, master_dict: dict) -> dict:
+def update_dict_with_nexus_full_path(
+ nexus_root_path: str, instrument: str, master_dict: dict
+) -> dict:
"""create dict of nexus path for each run number"""
for run_number in master_dict.keys():
master_dict[run_number][MasterDictKeys.nexus_path] = os.path.join(
@@ -717,7 +822,9 @@ def update_with_nexus_metadata(master_dict: dict) -> dict:
def update_dict_with_data_full_path(data_root_path: str, master_dict: dict) -> dict:
"""create dict of data path for each run number"""
for run_number in master_dict.keys():
- master_dict[run_number][MasterDictKeys.data_path] = os.path.join(data_root_path, f"Run_{run_number}")
+ master_dict[run_number][MasterDictKeys.data_path] = os.path.join(
+ data_root_path, f"Run_{run_number}"
+ )
def create_master_dict(
@@ -775,7 +882,9 @@ def create_master_dict(
return master_dict, status_metadata
-def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shutter_counts: list = None) -> list:
+def produce_list_shutter_for_each_image(
+ list_time_spectra: list = None, list_shutter_counts: list = None
+) -> list:
"""produce list of shutter counts for each image"""
delat_time_spectra = list_time_spectra[1] - list_time_spectra[0]
@@ -785,18 +894,26 @@ def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shu
logging.info(f"\t{list_index_jump = }")
logging.info(f"\t{list_shutter_counts = }")
- list_shutter_values_for_each_image = np.zeros(len(list_time_spectra), dtype=np.float32)
+ list_shutter_values_for_each_image = np.zeros(
+ len(list_time_spectra), dtype=np.float32
+ )
if len(list_shutter_counts) == 1: # resonance mode
list_shutter_values_for_each_image.fill(list_shutter_counts[0])
return list_shutter_values_for_each_image
- list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(list_shutter_counts[0])
+ list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(
+ list_shutter_counts[0]
+ )
for _index in range(1, len(list_index_jump)):
_start = list_index_jump[_index - 1]
_end = list_index_jump[_index]
- list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(list_shutter_counts[_index])
+ list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(
+ list_shutter_counts[_index]
+ )
- list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[-1]
+ list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[
+ -1
+ ]
return list_shutter_values_for_each_image
@@ -817,7 +934,9 @@ def combine_ob_images(
for _ob_run_number in ob_master_dict.keys():
logging.info(f"Combining ob# {_ob_run_number} ...")
- ob_data = np.array(ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32)
+ ob_data = np.array(
+ ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32
+ )
# get statistics of ob data
data_shape = ob_data.shape
@@ -826,7 +945,9 @@ def combine_ob_images(
number_of_zeros = np.sum(ob_data == 0)
logging.info(f"\t ob data shape: {data_shape}")
logging.info(f"\t Number of zeros in ob data: {number_of_zeros}")
- logging.info(f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%")
+ logging.info(
+ f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
+ )
logging.info(f"\t Mean of ob data: {np.mean(ob_data)}")
logging.info(f"\t maximum of ob data: {np.max(ob_data)}")
logging.info(f"\t minimum of ob data: {np.min(ob_data)}")
@@ -842,14 +963,20 @@ def combine_ob_images(
logging.info("\t -> Normalized by shutter counts")
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=ob_master_dict[_ob_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=ob_master_dict[_ob_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
logging.info(f"{list_shutter_values_for_each_image.shape = }")
temp_ob_data = np.empty_like(ob_data, dtype=np.float32)
for _index in range(len(list_shutter_values_for_each_image)):
- temp_ob_data[_index] = ob_data[_index] / list_shutter_values_for_each_image[_index]
+ temp_ob_data[_index] = (
+ ob_data[_index] / list_shutter_values_for_each_image[_index]
+ )
logging.info(f"{temp_ob_data.shape = }")
ob_data = temp_ob_data.copy()
@@ -897,9 +1024,15 @@ def combine_ob_images(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
- parser.add_argument("--sample", type=str, nargs=1, help="Full path to sample run number")
- parser.add_argument("--ob", type=str, nargs=1, help="Full path to the ob run number")
- parser.add_argument("--output", type=str, nargs=1, help="Path to the output folder", default="./")
+ parser.add_argument(
+ "--sample", type=str, nargs=1, help="Full path to sample run number"
+ )
+ parser.add_argument(
+ "--ob", type=str, nargs=1, help="Full path to the ob run number"
+ )
+ parser.add_argument(
+ "--output", type=str, nargs=1, help="Path to the output folder", default="./"
+ )
args = parser.parse_args()
logging.info(f"{args = }")
@@ -948,7 +1081,9 @@ def combine_ob_images(
# normalization(sample_folder=sample_folder, ob_folder=ob_folder, output_folder=output_folder)
- print(f"Normalization is done! Check the log file {log_file_name} for more details!")
+ print(
+ f"Normalization is done! Check the log file {log_file_name} for more details!"
+ )
print(f"Exported data to {output_folder}")
# sample = /SNS/VENUS/IPTS-34808/shared/autoreduce/mcp/November17_Sample6_UA_H_Batteries_1_5_Angs_min_30Hz_5C
diff --git a/notebooks/__code/normalization_resonance/normalization_for_timepix1_timepix3.py b/notebooks/__code/normalization_resonance/normalization_for_timepix1_timepix3.py
index 0e59f51b..0d80e774 100644
--- a/notebooks/__code/normalization_resonance/normalization_for_timepix1_timepix3.py
+++ b/notebooks/__code/normalization_resonance/normalization_for_timepix1_timepix3.py
@@ -14,7 +14,6 @@
from IPython.display import HTML, display
from PIL import Image
from skimage.io import imread
-from scipy.ndimage import median_filter
# from enum import Enum
# from scipy.constants import h, c, electron_volt, m_n
@@ -82,7 +81,9 @@ def _worker(fl):
return (imread(fl).astype(LOAD_DTYPE)).swapaxes(0, 1)
-def load_data_using_multithreading(list_tif: list = None, combine_tof: bool = False) -> np.ndarray:
+def load_data_using_multithreading(
+ list_tif: list = None, combine_tof: bool = False
+) -> np.ndarray:
"""load data using multithreading"""
with mp.Pool(processes=40) as pool:
data = pool.map(_worker, list_tif)
@@ -101,7 +102,9 @@ def retrieve_list_of_tif(folder: str) -> list:
def create_x_axis_file(
- lambda_array: np.ndarray = None, energy_array: np.ndarray = None, output_folder: str = "./"
+ lambda_array: np.ndarray = None,
+ energy_array: np.ndarray = None,
+ output_folder: str = "./",
) -> str:
"""create x axis file with lambda, energy and tof arrays"""
x_axis_data = {
@@ -139,6 +142,7 @@ def correct_chips_alignment(data: np.ndarray, config: dict) -> np.ndarray:
# return data_corrected
return data
+
def normalization_with_list_of_full_path(
sample_dict: dict = None,
ob_dict: dict = None,
@@ -208,10 +212,16 @@ def normalization_with_list_of_full_path(
export_corrected_stack_of_sample_data = export_mode.get("sample_stack", False)
export_corrected_stack_of_ob_data = export_mode.get("ob_stack", False)
- export_corrected_stack_of_normalized_data = export_mode.get("normalized_stack", False)
- export_corrected_integrated_sample_data = export_mode.get("sample_integrated", False)
+ export_corrected_stack_of_normalized_data = export_mode.get(
+ "normalized_stack", False
+ )
+ export_corrected_integrated_sample_data = export_mode.get(
+ "sample_integrated", False
+ )
export_corrected_integrated_ob_data = export_mode.get("ob_integrated", False)
- export_corrected_integrated_normalized_data = export_mode.get("normalized_integrated", False)
+ export_corrected_integrated_normalized_data = export_mode.get(
+ "normalized_integrated", False
+ )
export_x_axis = export_mode.get("x_axis", True)
logging.info(f"{export_corrected_stack_of_sample_data = }")
@@ -238,32 +248,40 @@ def normalization_with_list_of_full_path(
logging.info(f"loading ob# {_ob_run_number} ... ")
if verbose:
display(HTML(f"Loading ob# {_ob_run_number} ..."))
- ob_master_dict[_ob_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- ob_master_dict[_ob_run_number][MasterDictKeys.list_tif], combine_tof=False
+ ob_master_dict[_ob_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ ob_master_dict[_ob_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"ob# {_ob_run_number} loaded!")
logging.info(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
if verbose:
display(HTML(f"ob# {_ob_run_number} loaded!"))
- display(HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
+ )
if proton_charge_flag:
normalized_by_proton_charge = (
- sample_status_metadata.all_proton_charge_found and ob_status_metadata.all_proton_charge_found
+ sample_status_metadata.all_proton_charge_found
+ and ob_status_metadata.all_proton_charge_found
)
else:
normalized_by_proton_charge = False
if monitor_counts_flag:
normalized_by_monitor_counts = (
- sample_status_metadata.all_monitor_counts_found and ob_status_metadata.all_monitor_counts_found
+ sample_status_metadata.all_monitor_counts_found
+ and ob_status_metadata.all_monitor_counts_found
)
else:
normalized_by_monitor_counts = False
if shutter_counts_flag:
normalized_by_shutter_counts = (
- sample_status_metadata.all_shutter_counts_found and ob_status_metadata.all_shutter_counts_found
+ sample_status_metadata.all_shutter_counts_found
+ and ob_status_metadata.all_shutter_counts_found
)
else:
normalized_by_shutter_counts = False
@@ -280,9 +298,15 @@ def normalization_with_list_of_full_path(
max_iterations=max_iterations,
)
logging.info(f"{ob_data_combined.shape = }")
- logging.info(f"number of NaN in ob_data_combined data: {np.sum(np.isnan(ob_data_combined))}")
- logging.info(f"number of inf in ob_data_combined data: {np.sum(np.isinf(ob_data_combined))}")
- logging.info(f"number of zeros in ob_data_combined data: {np.sum(ob_data_combined == 0)} ")
+ logging.info(
+ f"number of NaN in ob_data_combined data: {np.sum(np.isnan(ob_data_combined))}"
+ )
+ logging.info(
+ f"number of inf in ob_data_combined data: {np.sum(np.isinf(ob_data_combined))}"
+ )
+ logging.info(
+ f"number of zeros in ob_data_combined data: {np.sum(ob_data_combined == 0)} "
+ )
if verbose:
display(HTML(f"{ob_data_combined.shape = }"))
@@ -291,7 +315,9 @@ def normalization_with_list_of_full_path(
logging.info("Correcting chips alignment ...")
if verbose:
display(HTML("Correcting chips alignment ..."))
- ob_data_combined = correct_chips_alignment(ob_data_combined, correct_chips_alignment_config)
+ ob_data_combined = correct_chips_alignment(
+ ob_data_combined, correct_chips_alignment_config
+ )
logging.info("Chips alignment corrected!")
if verbose:
display(HTML("Chips alignment corrected!"))
@@ -304,7 +330,9 @@ def normalization_with_list_of_full_path(
export_corrected_stack_of_ob_data,
export_corrected_integrated_ob_data,
ob_data_combined,
- spectra_file_name=ob_master_dict[_ob_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=ob_master_dict[_ob_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
# load dc images
@@ -312,39 +340,56 @@ def normalization_with_list_of_full_path(
logging.info(f"loading dc# {_dc_run_number} ... ")
if verbose:
display(HTML(f"Loading dc# {_dc_run_number} ..."))
- dc_master_dict[_dc_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- dc_master_dict[_dc_run_number][MasterDictKeys.list_tif], combine_tof=False
+ dc_master_dict[_dc_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ dc_master_dict[_dc_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"dc# {_dc_run_number} loaded!")
logging.info(f"{dc_master_dict[_dc_run_number][MasterDictKeys.data].shape = }")
if verbose:
display(HTML(f"dc# {_dc_run_number} loaded!"))
- display(HTML(f"{dc_master_dict[_dc_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(f"{dc_master_dict[_dc_run_number][MasterDictKeys.data].shape = }")
+ )
# combine all ob images
dc_data_combined = combine_dc_images(dc_master_dict)
-
+
# load sample images
for _sample_run_number in sample_master_dict.keys():
logging.info(f"loading sample# {_sample_run_number} ... ")
if verbose:
display(HTML(f"Loading sample# {_sample_run_number} ..."))
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- sample_master_dict[_sample_run_number][MasterDictKeys.list_tif], combine_tof=False
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ sample_master_dict[_sample_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"sample# {_sample_run_number} loaded!")
- logging.info(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }")
+ logging.info(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
if verbose:
display(HTML(f"sample# {_sample_run_number} loaded!"))
- display(HTML(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
+ )
if correct_chips_alignment_flag:
logging.info("Correcting chips alignment ...")
if verbose:
display(HTML("Correcting chips alignment ..."))
for _sample_run_number in sample_master_dict.keys():
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = correct_chips_alignment(
- sample_master_dict[_sample_run_number][MasterDictKeys.data], correct_chips_alignment_config
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ correct_chips_alignment(
+ sample_master_dict[_sample_run_number][MasterDictKeys.data],
+ correct_chips_alignment_config,
+ )
)
logging.info("Chips alignment corrected!")
if verbose:
@@ -369,7 +414,9 @@ def normalization_with_list_of_full_path(
logging.info(f"\t sample data shape: {data_shape}")
logging.info(f"\t data type of _sample_data: {_sample_data.dtype}")
logging.info(f"\t Number of zeros in sample data: {number_of_zeros}")
- logging.info(f"\t Number of nan in sample data: {np.sum(np.isnan(_sample_data))}")
+ logging.info(
+ f"\t Number of nan in sample data: {np.sum(np.isnan(_sample_data))}"
+ )
logging.info(
f"\t Percentage of zeros in sample data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
)
@@ -380,7 +427,9 @@ def normalization_with_list_of_full_path(
if normalized_by_proton_charge:
logging.info("\t -> Normalized by proton charge")
- proton_charge = sample_master_dict[_sample_run_number][MasterDictKeys.proton_charge]
+ proton_charge = sample_master_dict[_sample_run_number][
+ MasterDictKeys.proton_charge
+ ]
logging.info(f"\t\t proton charge: {proton_charge} C")
logging.info(f"\t\t{type(proton_charge) = }")
logging.info(f"\t\tbefore division: {_sample_data.dtype = }")
@@ -389,7 +438,9 @@ def normalization_with_list_of_full_path(
if normalized_by_monitor_counts:
logging.info("\t -> Normalized by monitor counts")
- monitor_counts = sample_master_dict[_sample_run_number][MasterDictKeys.monitor_counts]
+ monitor_counts = sample_master_dict[_sample_run_number][
+ MasterDictKeys.monitor_counts
+ ]
logging.info(f"\t\t monitor counts: {monitor_counts}")
logging.info(f"\t\t{type(monitor_counts) = }")
_sample_data = _sample_data / monitor_counts
@@ -397,12 +448,18 @@ def normalization_with_list_of_full_path(
if normalized_by_shutter_counts:
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=sample_master_dict[_sample_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=sample_master_dict[_sample_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
sample_data = []
- for _sample, _shutter_value in zip(_sample_data, list_shutter_values_for_each_image, strict=False):
+ for _sample, _shutter_value in zip(
+ _sample_data, list_shutter_values_for_each_image, strict=False
+ ):
sample_data.append(_sample / _shutter_value)
_sample_data = np.array(sample_data)
@@ -412,52 +469,74 @@ def normalization_with_list_of_full_path(
logging.info(f"{ob_data_combined.dtype = }")
# export sample data after correction if requested
- if export_corrected_stack_of_sample_data or export_corrected_integrated_sample_data:
+ if (
+ export_corrected_stack_of_sample_data
+ or export_corrected_integrated_sample_data
+ ):
export_sample_images(
output_folder,
export_corrected_stack_of_sample_data,
export_corrected_integrated_sample_data,
_sample_run_number,
_sample_data,
- spectra_file_name=sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
if dc_data_combined is not None:
- logging.info(f"normalization with DC subtraction")
- _normalized_data = np.divide(np.subtract(_sample_data, dc_data_combined), np.subtract(ob_data_combined, dc_data_combined),
- out=np.zeros_like(_sample_data),
- where=(ob_data_combined - dc_data_combined)!=0)
+ logging.info("normalization with DC subtraction")
+ _normalized_data = np.divide(
+ np.subtract(_sample_data, dc_data_combined),
+ np.subtract(ob_data_combined, dc_data_combined),
+ out=np.zeros_like(_sample_data),
+ where=(ob_data_combined - dc_data_combined) != 0,
+ )
else:
- logging.info(f"normalization without DC subtraction")
- _normalized_data = np.divide(_sample_data, ob_data_combined,
- out=np.zeros_like(_sample_data),
- where=ob_data_combined!=0)
-
+ logging.info("normalization without DC subtraction")
+ _normalized_data = np.divide(
+ _sample_data,
+ ob_data_combined,
+ out=np.zeros_like(_sample_data),
+ where=ob_data_combined != 0,
+ )
+
_normalized_data[ob_data_combined == 0] = 0
normalized_data[_sample_run_number] = _normalized_data
# normalized_data[_sample_run_number] = np.array(np.divide(_sample_data, ob_data_combined))
logging.info(f"{normalized_data[_sample_run_number].shape = }")
logging.info(f"{normalized_data[_sample_run_number].dtype = }")
- logging.info(f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[_sample_run_number]))}")
- logging.info(f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[_sample_run_number]))}")
+ logging.info(
+ f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[_sample_run_number]))}"
+ )
+ logging.info(
+ f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[_sample_run_number]))}"
+ )
- detector_delay_us = sample_master_dict[_sample_run_number][MasterDictKeys.detector_delay_us]
- time_spectra = sample_master_dict[_sample_run_number][MasterDictKeys.list_spectra]
+ detector_delay_us = sample_master_dict[_sample_run_number][
+ MasterDictKeys.detector_delay_us
+ ]
+ time_spectra = sample_master_dict[_sample_run_number][
+ MasterDictKeys.list_spectra
+ ]
if time_spectra is None:
- logging.info("Time spectra is None, cannot convert to lambda or energy arrays")
+ logging.info(
+ "Time spectra is None, cannot convert to lambda or energy arrays"
+ )
lambda_array = None
energy_array = None
-
- else:
- logging.info(f"We have a time_spectra!")
+ else:
+ logging.info("We have a time_spectra!")
logging.info(f"time spectra shape: {time_spectra.shape}")
-
+
if detector_delay_us is None:
detector_delay_us = 0.0
- logging.info(f"detector delay is None, setting it to {detector_delay_us} us")
+ logging.info(
+ f"detector delay is None, setting it to {detector_delay_us} us"
+ )
logging.info(f"we have a detector delay of {detector_delay_us} us")
@@ -487,26 +566,29 @@ def normalization_with_list_of_full_path(
logging.info(f"Preview: {preview = }")
if preview:
-
# display preview of normalized data
- fig, axs1 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs1 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
sample_data_integrated = np.nanmean(_sample_data, axis=0)
im0 = axs1[0].imshow(sample_data_integrated, cmap="gray")
plt.colorbar(im0, ax=axs1[0])
display(HTML(f"Preview of run {_sample_run_number}
"))
display(HTML(f"detector delay: {detector_delay_us:.2f} us"))
-
- axs1[0].set_title(f"Integrated Sample data")
+
+ axs1[0].set_title("Integrated Sample data")
sample_integrated1 = np.nansum(_sample_data, axis=1)
sample_integrated = np.nansum(sample_integrated1, axis=1)
- axs1[1].plot(sample_integrated, 'o')
+ axs1[1].plot(sample_integrated, "o")
axs1[1].set_xlabel("File image index")
axs1[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs2 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs2 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
ob_data_integrated = np.nanmean(ob_data_combined, axis=0)
im1 = axs2[0].imshow(ob_data_integrated, cmap="gray")
plt.colorbar(im1, ax=axs2[0])
@@ -514,13 +596,15 @@ def normalization_with_list_of_full_path(
ob_integrated1 = np.nansum(ob_data_combined, axis=1)
ob_integrated = np.nansum(ob_integrated1, axis=1)
- axs2[1].plot(ob_integrated, 'o')
+ axs2[1].plot(ob_integrated, "o")
axs2[1].set_xlabel("File image index")
axs2[1].set_ylabel("mean of full image")
plt.tight_layout()
if dc_data_combined is not None:
- fig, axs_dc = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs_dc = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
dc_data_integrated = np.nanmean(dc_data_combined, axis=0)
im_dc = axs_dc[0].imshow(dc_data_integrated, cmap="gray")
plt.colorbar(im_dc, ax=axs_dc[0])
@@ -528,26 +612,32 @@ def normalization_with_list_of_full_path(
dc_integrated1 = np.nansum(dc_data_combined, axis=1)
dc_integrated = np.nansum(dc_integrated1, axis=1)
- axs_dc[1].plot(dc_integrated, 'o')
+ axs_dc[1].plot(dc_integrated, "o")
axs_dc[1].set_xlabel("File image index")
axs_dc[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs3 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
- normalized_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ fig, axs3 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
+ normalized_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
im2 = axs3[0].imshow(normalized_data_integrated, cmap="gray")
plt.colorbar(im2, ax=axs3[0])
- axs3[0].set_title(f"Integrated Normalized data")
+ axs3[0].set_title("Integrated Normalized data")
profile_step1 = np.nanmean(normalized_data[_sample_run_number], axis=1)
profile = np.nanmean(profile_step1, axis=1)
- axs3[1].plot(profile, 'o')
+ axs3[1].plot(profile, "o")
axs3[1].set_xlabel("File image index")
axs3[1].set_ylabel("mean of full image")
plt.tight_layout()
if lambda_array is not None:
- fig, axs4 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs4 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
logging.info(f"{np.shape(profile) = }")
axs4[0].plot(lambda_array, profile, "*")
@@ -562,24 +652,36 @@ def normalization_with_list_of_full_path(
plt.show()
- if export_corrected_integrated_normalized_data or export_corrected_stack_of_normalized_data:
+ if (
+ export_corrected_integrated_normalized_data
+ or export_corrected_stack_of_normalized_data
+ ):
# make up new output folder name
list_ob_runs = list(ob_master_dict.keys())
- str_ob_runs = "_".join([str(_ob_run_number) for _ob_run_number in list_ob_runs])
+ str_ob_runs = "_".join(
+ [str(_ob_run_number) for _ob_run_number in list_ob_runs]
+ )
full_output_folder = os.path.join(
- output_folder, f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}"
+ output_folder,
+ f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}",
) # issue for WEI here !
full_output_folder = os.path.abspath(full_output_folder)
os.makedirs(full_output_folder, exist_ok=True)
if export_corrected_integrated_normalized_data:
# making up the integrated sample data
- sample_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ sample_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
full_file_name = os.path.join(full_output_folder, "integrated.tif")
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} ...")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} ..."
+ )
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} is done!"
+ )
if export_corrected_stack_of_normalized_data:
output_stack_folder = os.path.join(full_output_folder, "stack")
@@ -587,14 +689,22 @@ def normalization_with_list_of_full_path(
os.makedirs(output_stack_folder, exist_ok=True)
for _index, _data in enumerate(normalized_data[_sample_run_number]):
- _output_file = os.path.join(output_stack_folder, f"image{_index:04d}.tif")
+ _output_file = os.path.join(
+ output_stack_folder, f"image{_index:04d}.tif"
+ )
make_tiff(data=_data, filename=_output_file)
- logging.info(f"\t -> Exporting normalized data to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting normalized data to {output_stack_folder} is done!"
+ )
print(f"Exported normalized tif images are in: {output_stack_folder}!")
- spectra_file = sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name]
+ spectra_file = sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ]
if spectra_file and Path(spectra_file).exists():
- logging.info(f"Exported time spectra file {spectra_file} to {output_stack_folder}!")
+ logging.info(
+ f"Exported time spectra file {spectra_file} to {output_stack_folder}!"
+ )
shutil.copy(spectra_file, output_stack_folder)
# create x-axis file
@@ -613,7 +723,9 @@ def get_detector_offset_from_nexus(nexus_path: str) -> float:
"""get the detector offset from the nexus file"""
with h5py.File(nexus_path, "r") as hdf5_data:
try:
- detector_offset_micros = hdf5_data["entry"]["DASlogs"]["BL10:Det:TH:DSPT1:TIDelay"]["value"][0]
+ detector_offset_micros = hdf5_data["entry"]["DASlogs"][
+ "BL10:Det:TH:DSPT1:TIDelay"
+ ]["value"][0]
except KeyError:
detector_offset_micros = None
return detector_offset_micros
@@ -642,7 +754,9 @@ def export_sample_images(
make_tiff(data=_data, filename=_output_file)
logging.info(f"\t -> Exporting sample data to {output_stack_folder} is done!")
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!"
+ )
if export_corrected_integrated_sample_data:
# making up the integrated sample data
@@ -650,7 +764,9 @@ def export_sample_images(
full_file_name = os.path.join(sample_output_folder, "integrated.tif")
logging.info(f"\t -> Exporting integrated sample data to {full_file_name} ...")
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated sample data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated sample data to {full_file_name} is done!"
+ )
display(HTML(f"Created folder {output_stack_folder} for sample outputs!"))
@@ -667,10 +783,13 @@ def export_ob_images(
logging.info(f"> Exporting combined ob images to {output_folder} ...")
logging.info(f"\t{ob_run_numbers = }")
list_ob_runs_number_only = [
- str(isolate_run_number_from_full_path(_ob_run_number)) for _ob_run_number in ob_run_numbers
+ str(isolate_run_number_from_full_path(_ob_run_number))
+ for _ob_run_number in ob_run_numbers
]
if len(list_ob_runs_number_only) == 1:
- ob_output_folder = os.path.join(output_folder, f"ob_{list_ob_runs_number_only[0]}")
+ ob_output_folder = os.path.join(
+ output_folder, f"ob_{list_ob_runs_number_only[0]}"
+ )
else:
str_list_ob_runs = "_".join(list_ob_runs_number_only)
ob_output_folder = os.path.join(output_folder, f"ob_{str_list_ob_runs}")
@@ -699,7 +818,9 @@ def export_ob_images(
logging.info(f"\t -> Exporting ob data to {output_stack_folder} is done!")
# copy spectra file to the output folder
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!")
+ logging.info(
+ f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!"
+ )
display(HTML(f"Created folder {output_stack_folder} for OB outputs!"))
@@ -790,7 +911,9 @@ def update_dict_with_shutter_counts(master_dict: dict) -> tuple[dict, bool]:
if _value == "0":
break
list_shutter_counts.append(float(_value))
- master_dict[run_number][MasterDictKeys.shutter_counts] = list_shutter_counts
+ master_dict[run_number][MasterDictKeys.shutter_counts] = (
+ list_shutter_counts
+ )
return master_dict, status_all_shutter_counts_found
@@ -827,11 +950,15 @@ def update_dict_with_proton_charge(master_dict: dict) -> tuple[dict, bool]:
try:
with h5py.File(_nexus_path, "r") as hdf5_data:
- proton_charge = hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ proton_charge = (
+ hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ )
except KeyError:
proton_charge = None
status_all_proton_charge_found = False
- master_dict[_run_number][MasterDictKeys.proton_charge] = np.float32(proton_charge)
+ master_dict[_run_number][MasterDictKeys.proton_charge] = np.float32(
+ proton_charge
+ )
return status_all_proton_charge_found
@@ -852,14 +979,18 @@ def update_dict_with_monitor_counts(master_dict: dict) -> bool:
except KeyError:
monitor_counts = None
status_all_monitor_counts_found = False
- master_dict[_run_number][MasterDictKeys.monitor_counts] = np.float32(monitor_counts)
+ master_dict[_run_number][MasterDictKeys.monitor_counts] = np.float32(
+ monitor_counts
+ )
return status_all_monitor_counts_found
def update_dict_with_list_of_images(master_dict: dict) -> dict:
"""update the master dict with list of images"""
for _run_number in master_dict.keys():
- list_tif = retrieve_list_of_tif(master_dict[_run_number][MasterDictKeys.data_path])
+ list_tif = retrieve_list_of_tif(
+ master_dict[_run_number][MasterDictKeys.data_path]
+ )
logging.info(f"Retrieved {len(list_tif)} tif files for run {_run_number}!")
master_dict[_run_number][MasterDictKeys.list_tif] = list_tif
@@ -871,7 +1002,9 @@ def get_list_run_number(data_folder: str) -> list:
return list_run_number
-def update_dict_with_nexus_full_path(nexus_root_path: str, instrument: str, master_dict: dict) -> dict:
+def update_dict_with_nexus_full_path(
+ nexus_root_path: str, instrument: str, master_dict: dict
+) -> dict:
"""create dict of nexus path for each run number"""
for run_number in master_dict.keys():
master_dict[run_number][MasterDictKeys.nexus_path] = os.path.join(
@@ -892,7 +1025,9 @@ def update_with_nexus_metadata(master_dict: dict) -> dict:
def update_dict_with_data_full_path(data_root_path: str, master_dict: dict) -> dict:
"""create dict of data path for each run number"""
for run_number in master_dict.keys():
- master_dict[run_number][MasterDictKeys.data_path] = os.path.join(data_root_path, f"Run_{run_number}")
+ master_dict[run_number][MasterDictKeys.data_path] = os.path.join(
+ data_root_path, f"Run_{run_number}"
+ )
def create_master_dict(
@@ -942,7 +1077,9 @@ def create_master_dict(
return master_dict, status_metadata
-def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shutter_counts: list = None) -> list:
+def produce_list_shutter_for_each_image(
+ list_time_spectra: list = None, list_shutter_counts: list = None
+) -> list:
"""produce list of shutter counts for each image"""
delat_time_spectra = list_time_spectra[1] - list_time_spectra[0]
@@ -952,31 +1089,41 @@ def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shu
logging.info(f"\t{list_index_jump = }")
logging.info(f"\t{list_shutter_counts = }")
- list_shutter_values_for_each_image = np.zeros(len(list_time_spectra), dtype=np.float32)
+ list_shutter_values_for_each_image = np.zeros(
+ len(list_time_spectra), dtype=np.float32
+ )
if len(list_shutter_counts) == 1: # resonance mode
list_shutter_values_for_each_image.fill(list_shutter_counts[0])
return list_shutter_values_for_each_image
- list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(list_shutter_counts[0])
+ list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(
+ list_shutter_counts[0]
+ )
for _index in range(1, len(list_index_jump)):
_start = list_index_jump[_index - 1]
_end = list_index_jump[_index]
- list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(list_shutter_counts[_index])
+ list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(
+ list_shutter_counts[_index]
+ )
- list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[-1]
+ list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[
+ -1
+ ]
return list_shutter_values_for_each_image
-def replace_zero_with_local_median(data: np.ndarray,
- kernel_size: Tuple[int, int, int] = (3, 3, 3),
- max_iterations: int = 10) -> np.ndarray:
+def replace_zero_with_local_median(
+ data: np.ndarray,
+ kernel_size: Tuple[int, int, int] = (3, 3, 3),
+ max_iterations: int = 10,
+) -> np.ndarray:
"""
Replace 0 values in a 3D array using local median filtering.
This function ONLY processes small neighborhoods around 0 pixels,
avoiding expensive computation on the entire dataset.
-
+
Parameters:
-----------
data : np.ndarray
@@ -987,7 +1134,7 @@ def replace_zero_with_local_median(data: np.ndarray,
max_iterations : int
Maximum number of iterations to replace 0 values
Default is 10
-
+
Returns:
--------
np.ndarray
@@ -1006,7 +1153,7 @@ def replace_zero_with_local_median(data: np.ndarray,
# Calculate padding for kernel
pad_h, pad_w, pad_d = [k // 2 for k in kernel_size]
-
+
for iteration in range(max_iterations):
# Find current 0 locations
zero_coords = np.argwhere(result == 0)
@@ -1016,13 +1163,15 @@ def replace_zero_with_local_median(data: np.ndarray,
logging.info(f"All 0 values replaced after {iteration} iterations")
break
- logging.info(f"Iteration {iteration + 1}: {current_zero_count} 0 values remaining")
+ logging.info(
+ f"Iteration {iteration + 1}: {current_zero_count} 0 values remaining"
+ )
# Process each 0 pixel individually
replaced_count = 0
for coord in zero_coords:
y, x, z = coord
-
+
# Define the local neighborhood bounds
y_min = max(0, y - pad_h)
y_max = min(result.shape[0], y + pad_h + 1)
@@ -1030,13 +1179,13 @@ def replace_zero_with_local_median(data: np.ndarray,
x_max = min(result.shape[1], x + pad_w + 1)
z_min = max(0, z - pad_d)
z_max = min(result.shape[2], z + pad_d + 1)
-
+
# Extract the local neighborhood
neighborhood = result[y_min:y_max, x_min:x_max, z_min:z_max]
-
+
# Get non-NaN values in the neighborhood
valid_values = neighborhood[~np.isnan(neighborhood)]
-
+
# If we have valid values, compute median and replace
if len(valid_values) > 0:
median_value = np.median(valid_values)
@@ -1048,30 +1197,34 @@ def replace_zero_with_local_median(data: np.ndarray,
# If no progress was made, break
if replaced_count == 0:
remaining_zero_count = np.sum(result == 0)
- logging.info(f"No progress made. {remaining_zero_count} zero values could not be replaced")
+ logging.info(
+ f"No progress made. {remaining_zero_count} zero values could not be replaced"
+ )
logging.info("(These may be in regions with no valid neighbors)")
break
final_zero_count = np.sum(result == 0)
logging.info(f"Final zero count: {final_zero_count}")
- logging.info(f"Successfully replaced {initial_zero_count - final_zero_count} zero values")
+ logging.info(
+ f"Successfully replaced {initial_zero_count - final_zero_count} zero values"
+ )
return result
def combine_dc_images(dc_master_dict: dict) -> np.ndarray:
"""combine all dc images
-
+
Parameters:
-----------
dc_master_dict : dict
master dict of dc run numbers
-
+
Returns:
--------
np.ndarray
combined dc data
-
+
"""
logging.info("Combining all dark current images")
full_dc_data = []
@@ -1082,7 +1235,9 @@ def combine_dc_images(dc_master_dict: dict) -> np.ndarray:
for _dc_run_number in dc_master_dict.keys():
logging.info(f"Combining dc# {_dc_run_number} ...")
- dc_data = np.array(dc_master_dict[_dc_run_number][MasterDictKeys.data], dtype=np.float32)
+ dc_data = np.array(
+ dc_master_dict[_dc_run_number][MasterDictKeys.data], dtype=np.float32
+ )
full_dc_data.append(dc_data)
logging.info(f"{np.shape(full_dc_data) = }")
@@ -1101,11 +1256,11 @@ def combine_ob_images(
use_shutter_counts: bool = False,
replace_ob_zeros_by_nan: bool = False,
replace_ob_zeros_by_local_median: bool = False,
- kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
+ kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
max_iterations: int = 10,
) -> np.ndarray:
"""combine all ob images and correct by proton charge and shutter counts
-
+
Parameters:
-----------
ob_master_dict : dict
@@ -1124,12 +1279,12 @@ def combine_ob_images(
kernel size for local median filtering
max_iterations : int
maximum number of iterations for local median filtering
-
+
Returns:
--------
np.ndarray
combined ob data
-
+
"""
logging.info("Combining all open beam images")
@@ -1137,15 +1292,21 @@ def combine_ob_images(
logging.info(f"\tcorrecting by monitor counts: {use_monitor_counts}")
logging.info(f"\tshutter counts: {use_shutter_counts}")
logging.info(f"\treplace ob zeros by nan: {replace_ob_zeros_by_nan}")
- logging.info(f"\treplace ob zeros by local median: {replace_ob_zeros_by_local_median}")
- logging.info(f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
- f"x:{kernel_size_for_local_median[1]}, "
- f"tof:{kernel_size_for_local_median[2]}")
+ logging.info(
+ f"\treplace ob zeros by local median: {replace_ob_zeros_by_local_median}"
+ )
+ logging.info(
+ f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
+ f"x:{kernel_size_for_local_median[1]}, "
+ f"tof:{kernel_size_for_local_median[2]}"
+ )
full_ob_data_corrected = []
for _ob_run_number in ob_master_dict.keys():
logging.info(f"Combining ob# {_ob_run_number} ...")
- ob_data = np.array(ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32)
+ ob_data = np.array(
+ ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32
+ )
# get statistics of ob data
data_shape = ob_data.shape
@@ -1154,7 +1315,9 @@ def combine_ob_images(
number_of_zeros = np.sum(ob_data == 0)
logging.info(f"\t ob data shape: {data_shape}")
logging.info(f"\t Number of zeros in ob data: {number_of_zeros}")
- logging.info(f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%")
+ logging.info(
+ f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
+ )
logging.info(f"\t Mean of ob data: {np.mean(ob_data)}")
logging.info(f"\t maximum of ob data: {np.max(ob_data)}")
logging.info(f"\t minimum of ob data: {np.min(ob_data)}")
@@ -1172,7 +1335,9 @@ def combine_ob_images(
if use_monitor_counts:
logging.info("\t -> Normalized by monitor counts")
- monitor_counts = ob_master_dict[_ob_run_number][MasterDictKeys.monitor_counts]
+ monitor_counts = ob_master_dict[_ob_run_number][
+ MasterDictKeys.monitor_counts
+ ]
logging.info(f"\t\t monitor counts: {monitor_counts}")
logging.info(f"\t\t{type(monitor_counts) = }")
ob_data = ob_data / monitor_counts
@@ -1182,14 +1347,20 @@ def combine_ob_images(
logging.info("\t -> Normalized by shutter counts")
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=ob_master_dict[_ob_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=ob_master_dict[_ob_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
logging.info(f"{list_shutter_values_for_each_image.shape = }")
temp_ob_data = np.empty_like(ob_data, dtype=np.float32)
for _index in range(len(list_shutter_values_for_each_image)):
- temp_ob_data[_index] = ob_data[_index] / list_shutter_values_for_each_image[_index]
+ temp_ob_data[_index] = (
+ ob_data[_index] / list_shutter_values_for_each_image[_index]
+ )
logging.info(f"{temp_ob_data.shape = }")
ob_data = temp_ob_data.copy()
@@ -1197,9 +1368,11 @@ def combine_ob_images(
# logging.info(f"{ob_data_combined.shape = }")
if replace_ob_zeros_by_local_median:
- ob_data = replace_zero_with_local_median(ob_data,
- kernel_size=kernel_size_for_local_median,
- max_iterations=max_iterations)
+ ob_data = replace_zero_with_local_median(
+ ob_data,
+ kernel_size=kernel_size_for_local_median,
+ max_iterations=max_iterations,
+ )
full_ob_data_corrected.append(ob_data)
logging.info(f"{np.shape(full_ob_data_corrected) = }")
@@ -1224,9 +1397,15 @@ def combine_ob_images(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
- parser.add_argument("--sample", type=str, nargs=1, help="Full path to sample run number")
- parser.add_argument("--ob", type=str, nargs=1, help="Full path to the ob run number")
- parser.add_argument("--output", type=str, nargs=1, help="Path to the output folder", default="./")
+ parser.add_argument(
+ "--sample", type=str, nargs=1, help="Full path to sample run number"
+ )
+ parser.add_argument(
+ "--ob", type=str, nargs=1, help="Full path to the ob run number"
+ )
+ parser.add_argument(
+ "--output", type=str, nargs=1, help="Path to the output folder", default="./"
+ )
args = parser.parse_args()
logging.info(f"{args = }")
@@ -1275,7 +1454,9 @@ def combine_ob_images(
# normalization(sample_folder=sample_folder, ob_folder=ob_folder, output_folder=output_folder)
- print(f"Normalization is done! Check the log file {log_file_name} for more details!")
+ print(
+ f"Normalization is done! Check the log file {log_file_name} for more details!"
+ )
print(f"Exported data to {output_folder}")
# sample = /SNS/VENUS/IPTS-34808/shared/autoreduce/mcp/November17_Sample6_UA_H_Batteries_1_5_Angs_min_30Hz_5C
diff --git a/notebooks/__code/normalization_resonance/normalization_resonance.py b/notebooks/__code/normalization_resonance/normalization_resonance.py
index 75ad3d2c..0f3c428f 100644
--- a/notebooks/__code/normalization_resonance/normalization_resonance.py
+++ b/notebooks/__code/normalization_resonance/normalization_resonance.py
@@ -1,42 +1,29 @@
-import glob
import logging
import logging as notebook_logging
import os
-from pathlib import Path
-import numpy as np
import ipywidgets as widgets
import matplotlib.pyplot as plt
from IPython.display import HTML, display
from ipywidgets import interactive
-from PIL import Image
-from pleiades.processing.normalization import normalization as normalization_with_pleaides
+from pleiades.processing.normalization import (
+ normalization as normalization_with_pleaides,
+)
from pleiades.processing import Roi as PleiadesRoi
from pleiades.processing import Facility
-from __code._utilities.list import extract_list_of_runs_from_string
-from __code._utilities.nexus import extract_file_path_from_nexus
# from __code.ipywe.myfileselector import MyFileSelectorPanel
from __code.normalization_tof.normalization_tof import NormalizationTof
-from __code.ipywe.fileselector import FileSelectorPanel as MyFileSelectorPanel
-from __code.normalization_tof import DetectorType, autoreduce_dir, distance_source_detector_m, raw_dir
-from __code.normalization_tof.config import DEBUG_DATA, timepix1_config, timepix3_config
-from __code.normalization_tof.normalization_for_timepix1_timepix3 import (
- load_data_using_multithreading,
- # normalization,
- normalization_with_list_of_full_path,
- retrieve_list_of_tif,
-)
+from __code.normalization_tof import DetectorType
class NormalizationResonance(NormalizationTof):
-
sample_folder = None
sample_run_numbers = None
sample_run_numbers_selected = None
-
+
ob_folder = None
ob_run_numbers = None
ob_run_numbers_selected = None
@@ -46,7 +33,7 @@ class NormalizationResonance(NormalizationTof):
dc_run_numbers_selected = None
output_folder = None
-
+
# {'full_path_data': {'data': None, 'nexus': None}}
dict_sample = {}
dict_ob = {}
@@ -75,11 +62,13 @@ def initialize(self):
def retrieve_nexus_file_path(self):
"""
Retrieve the NeXus file paths for sample, OB and DC.
-
+
This function assumes that the NeXus files are named in a specific format"""
all_nexus_files_found = True
- notebook_logging.info("Retrieving NeXus file paths for sample, OB and DC runs...")
+ notebook_logging.info(
+ "Retrieving NeXus file paths for sample, OB and DC runs..."
+ )
notebook_logging.info("\tworking with sample runs:")
for full_path in self.dict_sample.keys():
@@ -143,7 +132,6 @@ def retrieve_nexus_file_path(self):
return all_nexus_files_found
def settings(self):
-
self.select_roi_widget = widgets.Checkbox(
description="Select ROI on the detector",
value=True,
@@ -169,20 +157,22 @@ def settings(self):
self.select_roi_widget,
self.combine_mode_widget,
],
- layout=widgets.Layout(padding="10px",
- border="solid 1px",
- width="620px"),
+ layout=widgets.Layout(padding="10px", border="solid 1px", width="620px"),
)
display(verti_layout)
def normalization(self):
- display(HTML("Running normalization ..."))
+ display(
+ HTML(
+ "Running normalization ..."
+ )
+ )
sample_folders = list(self.dict_sample.keys())
ob_folders = list(self.dict_ob.keys())
facility = Facility.ornl
nexus_full_path = os.path.join(self.nexus_folder)
-
+
if self.roi:
left = self.roi.left
top = self.roi.top
@@ -190,23 +180,29 @@ def normalization(self):
height = self.roi.height
roi = PleiadesRoi(x1=left, y1=top, width=width, height=height)
- logging.info(f"normalization:")
+ logging.info("normalization:")
logging.info(f"\tsample_folders: {sample_folders}")
logging.info(f"\tob_folders: {ob_folders}")
logging.info(f"\roi: {roi}")
logging.info(f"\tnexus_path: {nexus_full_path}")
- self.transmission = normalization_with_pleaides(list_sample_folders=sample_folders,
- list_obs_folders=ob_folders,
- nexus_path=nexus_full_path,
- facility=facility,
- combine_mode=self.combine_mode_widget.value,
- roi=roi,
- pc_uncertainty=0.005,
- output_folder=self.output_folder,)
-
+ self.transmission = normalization_with_pleaides(
+ list_sample_folders=sample_folders,
+ list_obs_folders=ob_folders,
+ nexus_path=nexus_full_path,
+ facility=facility,
+ combine_mode=self.combine_mode_widget.value,
+ roi=roi,
+ pc_uncertainty=0.005,
+ output_folder=self.output_folder,
+ )
+
logging.info("Done with normalization.")
- display(HTML("Done with normalization."))
+ display(
+ HTML(
+ "Done with normalization."
+ )
+ )
self.display_normalization_results()
@@ -218,10 +214,9 @@ def display_normalization_results(self):
sample_folders = list(self.dict_sample.keys())
default_xmin = 0
- default_xmax = len(transmission[0].energy)-1
+ default_xmax = len(transmission[0].energy) - 1
def plot_transmission(_index, xrange):
-
_transmission = transmission[_index]
xmin, xmax = xrange
@@ -237,25 +232,33 @@ def plot_transmission(_index, xrange):
y_axis = _transmission.transmission[::-1]
y_error = _transmission.uncertainty[::-1]
- ax.errorbar(x_axis[xmin:xmax], y_axis[xmin:xmax], yerr=y_error[xmin:xmax], fmt="-o", markersize=2, label="Transmission")
+ ax.errorbar(
+ x_axis[xmin:xmax],
+ y_axis[xmin:xmax],
+ yerr=y_error[xmin:xmax],
+ fmt="-o",
+ markersize=2,
+ label="Transmission",
+ )
ax.set_xlabel("Energy (eV)")
ax.set_ylabel("Transmission")
ax.set_title("Normalized Transmission Spectrum")
plt.show()
- display(HTML(f"Metadata:"))
+ display(HTML("Metadata:"))
- n_dead_pixels = _transmission.metadata['n_dead_pixels']
- n_valid_pixels = _transmission.metadata['n_valid_pixels']
- sample_proton_charge = _transmission.metadata['sample_proton_charge']
- ob_proton_charge = _transmission.metadata['ob_proton_charge']
- pc_uncertainty_sample = _transmission.metadata['pc_uncertainty_sample']
- pc_uncertainty_ob = _transmission.metadata['pc_uncertainty_ob']
- method_used = _transmission.metadata['method']
- sample_folder = _transmission.metadata['sample_folder']
- ob_folders = _transmission.metadata['ob_folders']
+ n_dead_pixels = _transmission.metadata["n_dead_pixels"]
+ n_valid_pixels = _transmission.metadata["n_valid_pixels"]
+ sample_proton_charge = _transmission.metadata["sample_proton_charge"]
+ ob_proton_charge = _transmission.metadata["ob_proton_charge"]
+ pc_uncertainty_sample = _transmission.metadata["pc_uncertainty_sample"]
+ pc_uncertainty_ob = _transmission.metadata["pc_uncertainty_ob"]
+ method_used = _transmission.metadata["method"]
+ sample_folder = _transmission.metadata["sample_folder"]
+ ob_folders = _transmission.metadata["ob_folders"]
- display(HTML(f"""
+ display(
+ HTML(f"""
| Parameter | Value |
| Number of Dead Pixels | {n_dead_pixels} |
| Number of Valid Pixels | {n_valid_pixels} |
@@ -266,10 +269,17 @@ def plot_transmission(_index, xrange):
| OB Folders | {ob_folders} |
| Output Folder | {self.output_folder} |
| ROI Selected | {self.roi} |
-
"""))
+
""")
+ )
- file_created = os.path.join(self.output_folder, os.path.basename(sample_folder), "_transmission.txt")
- display(HTML(f"Transmission file created: {file_created}"))
+ file_created = os.path.join(
+ self.output_folder, os.path.basename(sample_folder), "_transmission.txt"
+ )
+ display(
+ HTML(
+ f"Transmission file created: {file_created}"
+ )
+ )
display_plot_transmisison = interactive(
plot_transmission,
@@ -280,16 +290,16 @@ def plot_transmission(_index, xrange):
step=1,
description="File index:",
continuous_update=False,
- disabled=True if len(transmission) == 1 else False,
+ disabled=True if len(transmission) == 1 else False,
+ layout=widgets.Layout(width="600px"),
+ ),
+ xrange=widgets.IntRangeSlider(
+ value=[default_xmin, default_xmax],
+ min=default_xmin,
+ max=default_xmax,
+ description="E range:",
+ continuous_update=True,
layout=widgets.Layout(width="600px"),
),
- xrange=widgets.IntRangeSlider(value=[default_xmin, default_xmax],
- min=default_xmin,
- max=default_xmax,
- description="E range:",
- continuous_update=True,
- layout=widgets.Layout(width="600px")),
-
)
display(display_plot_transmisison)
-
diff --git a/notebooks/__code/normalization_resonance/units.py b/notebooks/__code/normalization_resonance/units.py
index 8acb982f..43c1bc96 100644
--- a/notebooks/__code/normalization_resonance/units.py
+++ b/notebooks/__code/normalization_resonance/units.py
@@ -135,7 +135,9 @@ def convert_to_cross_section(from_unit, to_unit):
return conversion_factors[from_unit] / conversion_factors[to_unit]
-def convert_from_wavelength_to_energy_ev(wavelength, unit_from=DistanceUnitOptions.angstrom):
+def convert_from_wavelength_to_energy_ev(
+ wavelength, unit_from=DistanceUnitOptions.angstrom
+):
"""Convert wavelength to energy based on the given units.
Args:
@@ -177,15 +179,21 @@ def convert_array_from_time_to_lambda(
np.ndarray: Array of wavelength values.
"""
time_array_s = time_array * convert_time_units(time_unit, TimeUnitOptions.s)
- detector_offset_s = detector_offset * convert_time_units(detector_offset_unit, TimeUnitOptions.s)
+ detector_offset_s = detector_offset * convert_time_units(
+ detector_offset_unit, TimeUnitOptions.s
+ )
distance_source_detector_m = distance_source_detector * convert_distance_units(
distance_source_detector_unit, DistanceUnitOptions.m
)
h_over_mn = h / m_n
- lambda_m = h_over_mn * (time_array_s + detector_offset_s) / distance_source_detector_m
+ lambda_m = (
+ h_over_mn * (time_array_s + detector_offset_s) / distance_source_detector_m
+ )
- lambda_converted = lambda_m * convert_distance_units(DistanceUnitOptions.m, lambda_unit)
+ lambda_converted = lambda_m * convert_distance_units(
+ DistanceUnitOptions.m, lambda_unit
+ )
return lambda_converted
@@ -225,13 +233,22 @@ def convert_array_from_time_to_energy(
detector_units_factor = convert_time_units(detector_offset_unit, TimeUnitOptions.s)
detector_offset = detector_units_factor * detector_offset
- distance_source_detector_factor = convert_distance_units(distance_source_detector_unit, DistanceUnitOptions.m)
- distance_source_detector_m = distance_source_detector * distance_source_detector_factor
+ distance_source_detector_factor = convert_distance_units(
+ distance_source_detector_unit, DistanceUnitOptions.m
+ )
+ distance_source_detector_m = (
+ distance_source_detector * distance_source_detector_factor
+ )
# Calculate the energy in eV using the formula E_ev = 1/2 m_n (L/t_tof)^2 / electron_volt
full_time_array_s = time_array_s + detector_offset
- energy_array_ev = 0.5 * m_n * (distance_source_detector_m / full_time_array_s) ** 2 / electron_volt
+ energy_array_ev = (
+ 0.5
+ * m_n
+ * (distance_source_detector_m / full_time_array_s) ** 2
+ / electron_volt
+ )
energy_array_factor = convert_to_energy(EnergyUnitOptions.eV, energy_unit)
energy_array = energy_array_ev * energy_array_factor
diff --git a/notebooks/__code/normalization_script.py b/notebooks/__code/normalization_script.py
index 69f0b77e..89a9847a 100755
--- a/notebooks/__code/normalization_script.py
+++ b/notebooks/__code/normalization_script.py
@@ -5,9 +5,15 @@
parser = argparse.ArgumentParser(description="Neutron Imaging Normalization")
parser.add_argument("-o", "--output", help="output folder", type=str)
-parser.add_argument("-sf", "--sample_files", help="comma separated list of samples", type=str)
-parser.add_argument("-of", "--ob_files", help="comma separated list of open beams", type=str)
-parser.add_argument("-dc", "--dc_files", help="comma separated list of dark current", type=str)
+parser.add_argument(
+ "-sf", "--sample_files", help="comma separated list of samples", type=str
+)
+parser.add_argument(
+ "-of", "--ob_files", help="comma separated list of open beams", type=str
+)
+parser.add_argument(
+ "-dc", "--dc_files", help="comma separated list of dark current", type=str
+)
parser.add_argument("-rois", help="colon string of each roi: x0,y0,x1,y1")
diff --git a/notebooks/__code/normalization_tof/__init__.py b/notebooks/__code/normalization_tof/__init__.py
index 7f54b5b4..e5acfdce 100644
--- a/notebooks/__code/normalization_tof/__init__.py
+++ b/notebooks/__code/normalization_tof/__init__.py
@@ -17,8 +17,8 @@ class DataType:
ob = "ob"
dc = "dc"
-class Roi:
+class Roi:
def __init__(self, left: int = 0, top: int = 0, width: int = 1, height: int = 1):
self.left: int = left
self.top: int = top
diff --git a/notebooks/__code/normalization_tof/config.py b/notebooks/__code/normalization_tof/config.py
index c92ea4d0..90a35d9f 100644
--- a/notebooks/__code/normalization_tof/config.py
+++ b/notebooks/__code/normalization_tof/config.py
@@ -13,6 +13,7 @@ class DEBUG_DATA:
container_roi = [150, 150, 40, 40]
detector_type = DetectorType.tpx1 # timepix1 or timepix3
+
## timepix3
# class DEBUG_DATA:
# ipts = "IPTS-35167"
diff --git a/notebooks/__code/normalization_tof/normalization_for_timepix1_timepix3.py b/notebooks/__code/normalization_tof/normalization_for_timepix1_timepix3.py
index cfeb3206..cb48e44e 100644
--- a/notebooks/__code/normalization_tof/normalization_for_timepix1_timepix3.py
+++ b/notebooks/__code/normalization_tof/normalization_for_timepix1_timepix3.py
@@ -1,22 +1,9 @@
-import argparse
-import glob
import logging
-import multiprocessing as mp
import os
-from random import sample
-import shutil
-from pathlib import Path
from typing import Tuple
-from annotated_types import Not
-import h5py
-import matplotlib.pyplot as plt
import numpy as np
-import pandas as pd
from IPython.display import HTML, display
-from PIL import Image
-from skimage.io import imread
-from scipy.ndimage import median_filter
from __code.normalization_tof.utilities import *
@@ -26,11 +13,12 @@
MARKERSIZE = 2
+
class NormalizedData:
- data= {}
- lambda_array= None
- tof_array= None
- energy_array= None
+ data = {}
+ lambda_array = None
+ tof_array = None
+ energy_array = None
from __code.normalization_tof.units import (
@@ -47,15 +35,18 @@ class NormalizedData:
PROTON_CHARGE_TOLERANCE = 0.1
+
def initialize_logging():
"""initialize logging"""
file_name, ext = os.path.splitext(os.path.basename(__file__))
user_name = os.getlogin() # add user name to the log file name
log_file_name = os.path.join(LOG_PATH, f"{user_name}_{file_name}.log")
- logging.basicConfig(filename=log_file_name,
- filemode='w',
- format='[%(levelname)s] - %(asctime)s - %(message)s',
- level=logging.INFO)
+ logging.basicConfig(
+ filename=log_file_name,
+ filemode="w",
+ format="[%(levelname)s] - %(asctime)s - %(message)s",
+ level=logging.INFO,
+ )
logging.info(f"*** Starting a new script {file_name} ***")
@@ -82,11 +73,12 @@ def normalization_with_list_of_full_path(
correct_chips_alignment_flag: bool = True,
correct_chips_alignment_config: dict = None,
export_mode: dict = None,
- roi = None,
- container_roi = None,
- container_roi_file = None) -> NormalizedData:
+ roi=None,
+ container_roi=None,
+ container_roi_file=None,
+) -> NormalizedData:
"""normalize the sample data with ob data using proton charge and shutter counts
-
+
Args:
sample_dict (dict): dictionary with sample run numbers and their data
{base_name_run1: {'full_path': full_path, 'nexus': nexus_path},
@@ -119,7 +111,7 @@ def normalization_with_list_of_full_path(
correct_chips_alignment_config (dict): configuration for chips alignment correction
export_mode (dict): dictionary with export options
roi (Roi): region of interest for full spectrum normalization
- container_roi (Roi): region of interest for container only normalization
+ container_roi (Roi): region of interest for container only normalization
container_roi_file (str): file path to container ROI file (scitiff format) (will take precedence over container_roi if both are provided)
Returns:
@@ -144,33 +136,39 @@ def normalization_with_list_of_full_path(
export_corrected_stack_of_sample_data = export_mode.get("sample_stack", False)
export_corrected_stack_of_ob_data = export_mode.get("ob_stack", False)
- export_corrected_stack_of_normalized_data = export_mode.get("normalized_stack", False)
+ export_corrected_stack_of_normalized_data = export_mode.get(
+ "normalized_stack", False
+ )
# export_corrected_stack_of_combined_normalized_data = export_mode.get("combined_normalized_stack", False)
- export_corrected_integrated_sample_data = export_mode.get("sample_integrated", False)
+ export_corrected_integrated_sample_data = export_mode.get(
+ "sample_integrated", False
+ )
export_corrected_integrated_ob_data = export_mode.get("ob_integrated", False)
- export_corrected_integrated_normalized_data = export_mode.get("normalized_integrated", False)
+ export_corrected_integrated_normalized_data = export_mode.get(
+ "normalized_integrated", False
+ )
# export_corrected_integrated_combined_normalized_data = export_mode.get("combined_normalized_integrated", False)
export_x_axis = export_mode.get("x_axis", True)
logging.info("Input parameters:")
-
+
logging.info(f"\t{sample_dict = }")
logging.info(f"\t{combine_samples =}")
logging.info(f"\t{ob_dict = }")
logging.info(f"\t{dc_dict = }")
logging.info(f"{spectra_array = }")
-
- logging.info(f"")
- logging.info(f"- export mode:")
+
+ logging.info("")
+ logging.info("- export mode:")
logging.info(f"\t{export_corrected_stack_of_sample_data = }")
logging.info(f"\t{export_corrected_stack_of_ob_data = }")
logging.info(f"\t{export_corrected_stack_of_normalized_data = }")
logging.info(f"\t{export_corrected_integrated_sample_data = }")
logging.info(f"\t{export_corrected_integrated_ob_data = }")
logging.info(f"\t{export_corrected_integrated_normalized_data = }")
-
- logging.info(f"")
+
+ logging.info("")
logging.info(f"{roi =}")
logging.info(f"{export_x_axis = }")
logging.info(f"{proton_charge_flag = }")
@@ -180,34 +178,35 @@ def normalization_with_list_of_full_path(
logging.info(f"{max_iterations = }")
logging.info(f"{correct_chips_alignment_flag = }")
logging.info(f"{distance_source_detector_m = }")
- logging.info(f"")
-
+ logging.info("")
+
sample_master_dict, sample_status_metadata = create_master_dict(
- data_dictionary=sample_dict,
- data_type=DataType.sample,
+ data_dictionary=sample_dict,
+ data_type=DataType.sample,
instrument=instrument,
spectra_array=spectra_array,
)
ob_master_dict, ob_status_metadata = create_master_dict(
- data_dictionary=ob_dict,
- data_type=DataType.ob,
+ data_dictionary=ob_dict,
+ data_type=DataType.ob,
instrument=instrument,
spectra_array=spectra_array,
)
dc_master_dict, dc_status_metadata = create_master_dict(
- data_dictionary=dc_dict,
- data_type=DataType.dc,
+ data_dictionary=dc_dict,
+ data_type=DataType.dc,
instrument=instrument,
spectra_array=spectra_array,
)
# load ob images ===============================
load_images(master_dict=ob_master_dict, data_type=DataType.ob, verbose=verbose)
-
+
if proton_charge_flag:
normalized_by_proton_charge = (
- sample_status_metadata.all_proton_charge_found and ob_status_metadata.all_proton_charge_found
+ sample_status_metadata.all_proton_charge_found
+ and ob_status_metadata.all_proton_charge_found
)
else:
normalized_by_proton_charge = False
@@ -215,28 +214,36 @@ def normalization_with_list_of_full_path(
# combine all ob images
ob_data_combined, ob_sum_proton_charge = combine_images(
- data_type=DataType.ob,
- master_dict=ob_master_dict,
- use_proton_charge=normalized_by_proton_charge,
- replace_zeros_by_nan=replace_ob_zeros_by_nan_flag,
- replace_zeros_by_local_median=replace_ob_zeros_by_local_median_flag,
- kernel_size_for_local_median=kernel_size_for_local_median,
- max_iterations=max_iterations,
- )
+ data_type=DataType.ob,
+ master_dict=ob_master_dict,
+ use_proton_charge=normalized_by_proton_charge,
+ replace_zeros_by_nan=replace_ob_zeros_by_nan_flag,
+ replace_zeros_by_local_median=replace_ob_zeros_by_local_median_flag,
+ kernel_size_for_local_median=kernel_size_for_local_median,
+ max_iterations=max_iterations,
+ )
logging.info(f"{ob_data_combined.shape = }")
logging.info(f"{ob_sum_proton_charge = }")
- logging.info(f"number of NaN in ob_data_combined data: {np.sum(np.isnan(ob_data_combined))}")
- logging.info(f"number of inf in ob_data_combined data: {np.sum(np.isinf(ob_data_combined))}")
- logging.info(f"number of zeros in ob_data_combined data: {np.sum(ob_data_combined == 0)} ")
+ logging.info(
+ f"number of NaN in ob_data_combined data: {np.sum(np.isnan(ob_data_combined))}"
+ )
+ logging.info(
+ f"number of inf in ob_data_combined data: {np.sum(np.isinf(ob_data_combined))}"
+ )
+ logging.info(
+ f"number of zeros in ob_data_combined data: {np.sum(ob_data_combined == 0)} "
+ )
if correct_chips_alignment_flag:
- correct_chips_alignment(ob_data_combined,
- correct_chips_alignment_config,
- verbose=verbose)
+ correct_chips_alignment(
+ ob_data_combined, correct_chips_alignment_config, verbose=verbose
+ )
- ob_data_combined_for_spectrum = calculate_ob_data_combined_used_by_spectrum_normalization(roi=roi,
- ob_data_combined=ob_data_combined,
- verbose=verbose)
+ ob_data_combined_for_spectrum = (
+ calculate_ob_data_combined_used_by_spectrum_normalization(
+ roi=roi, ob_data_combined=ob_data_combined, verbose=verbose
+ )
+ )
# export ob data if requested
first_ob_run_number = list(ob_master_dict.keys())[0]
@@ -247,150 +254,180 @@ def normalization_with_list_of_full_path(
export_corrected_stack_of_ob_data,
export_corrected_integrated_ob_data,
ob_data_combined,
- spectra_file_name=ob_master_dict[first_ob_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=ob_master_dict[first_ob_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
spectra_array=spectra_array,
)
# load dc images ================================
- dc_master_dict = load_images(master_dict=dc_master_dict, data_type=DataType.dc, verbose=verbose)
+ dc_master_dict = load_images(
+ master_dict=dc_master_dict, data_type=DataType.dc, verbose=verbose
+ )
# combine all dc images
dc_data_combined = combine_dc_images(dc_master_dict)
-
+
if dc_data_combined is not None:
-
if correct_chips_alignment_flag:
- dc_data_combined = correct_chips_alignment(dc_data_combined,
- correct_chips_alignment_config,
- verbose=verbose)
+ dc_data_combined = correct_chips_alignment(
+ dc_data_combined, correct_chips_alignment_config, verbose=verbose
+ )
if (dc_data_combined is not None) and (roi is not None):
- dc_data_combined_for_spectrum = [np.sum(np.sum(_data, axis=0), axis=0) for _data in dc_data_combined]
+ dc_data_combined_for_spectrum = [
+ np.sum(np.sum(_data, axis=0), axis=0) for _data in dc_data_combined
+ ]
logging.info(f"\t{np.shape(dc_data_combined) = }")
logging.info(f"\t{np.shape(dc_data_combined_for_spectrum) = }")
-
+
if correct_chips_alignment_flag:
- dc_data_combined_for_spectrum = correct_chips_alignment(dc_data_combined_for_spectrum,
- correct_chips_alignment_config,
- verbose=verbose)
+ dc_data_combined_for_spectrum = correct_chips_alignment(
+ dc_data_combined_for_spectrum,
+ correct_chips_alignment_config,
+ verbose=verbose,
+ )
else:
- logging.info(f"\tno roi provided! Skipping the normalization of spectrum.")
+ logging.info("\tno roi provided! Skipping the normalization of spectrum.")
dc_data_combined_for_spectrum = None
# load sample images ===============================
- load_images(master_dict=sample_master_dict, data_type=DataType.sample, verbose=verbose)
+ load_images(
+ master_dict=sample_master_dict, data_type=DataType.sample, verbose=verbose
+ )
if correct_chips_alignment_flag:
- correct_all_samples_chips_alignment(sample_master_dict,
- correct_chips_alignment_config,
- verbose=verbose)
+ correct_all_samples_chips_alignment(
+ sample_master_dict, correct_chips_alignment_config, verbose=verbose
+ )
normalized_data = {}
integrated_normalized_data = {}
spectrum_normalized_data = {}
if combine_samples:
-
- # combine all sample images and then perform normalization
+ # combine all sample images and then perform normalization
sample_data_combined, sample_sum_proton_charge = combine_images(
- data_type=DataType.sample,
- master_dict=sample_master_dict,
- use_proton_charge=normalized_by_proton_charge,
- # use_monitor_counts=normalized_by_monitor_counts,
- replace_zeros_by_nan=False,
- replace_zeros_by_local_median=replace_ob_zeros_by_local_median_flag,
- kernel_size_for_local_median=kernel_size_for_local_median,
- max_iterations=max_iterations,
- )
+ data_type=DataType.sample,
+ master_dict=sample_master_dict,
+ use_proton_charge=normalized_by_proton_charge,
+ # use_monitor_counts=normalized_by_monitor_counts,
+ replace_zeros_by_nan=False,
+ replace_zeros_by_local_median=replace_ob_zeros_by_local_median_flag,
+ kernel_size_for_local_median=kernel_size_for_local_median,
+ max_iterations=max_iterations,
+ )
logging.info("**********************************")
list_run_number = list(sample_master_dict.keys())
- str_list_run_number = '_'.join([str(r) for r in list_run_number])
+ str_list_run_number = "_".join([str(r) for r in list_run_number])
logging.info(f"normalization of combined sample runs {list_run_number}")
if verbose:
display(HTML(f"Normalization of combined sample runs {list_run_number}"))
-
+
# get statistics of sample data
- logging_statistics_of_data(data=sample_data_combined, data_type=DataType.sample_combined)
-
+ logging_statistics_of_data(
+ data=sample_data_combined, data_type=DataType.sample_combined
+ )
+
if correct_chips_alignment_flag:
- sample_data_combined = correct_chips_alignment(sample_data_combined,
- correct_chips_alignment_config,
- verbose=verbose)
-
+ sample_data_combined = correct_chips_alignment(
+ sample_data_combined, correct_chips_alignment_config, verbose=verbose
+ )
+
if normalized_by_proton_charge:
- logging.info(f"Normalizing by proton charge")
+ logging.info("Normalizing by proton charge")
logging.info(f"\t{sample_sum_proton_charge = }")
if verbose:
- display(HTML(f"Normalizing by proton charge"))
- sample_data_combined /= sample_sum_proton_charge
+ display(HTML("Normalizing by proton charge"))
+ sample_data_combined /= sample_sum_proton_charge
if (container_roi is not None) or (container_roi_file is not None):
- logging.info(f"Applying container normalization:")
- logging.info(f"\t {container_roi = }")
- logging.info(f"\t {container_roi_file = }")
- if verbose:
- display(HTML(f"Applying container normalization:"))
-
- sample_data_combined, container_roi_file = normalize_by_container_roi(
- sample_data=sample_data_combined,
- container_roi=container_roi,
- container_roi_file=container_roi_file,
- output_folder=output_folder,
- sample_run_number=str_list_run_number,
- )
- if verbose and (container_roi_file is not None):
- display(HTML(f"Container roi file created: {container_roi_file}."))
+ logging.info("Applying container normalization:")
+ logging.info(f"\t {container_roi = }")
+ logging.info(f"\t {container_roi_file = }")
+ if verbose:
+ display(HTML("Applying container normalization:"))
+
+ sample_data_combined, container_roi_file = normalize_by_container_roi(
+ sample_data=sample_data_combined,
+ container_roi=container_roi,
+ container_roi_file=container_roi_file,
+ output_folder=output_folder,
+ sample_run_number=str_list_run_number,
+ )
+ if verbose and (container_roi_file is not None):
+ display(HTML(f"Container roi file created: {container_roi_file}."))
# export sample data after correction if requested
- if export_corrected_stack_of_sample_data or export_corrected_integrated_sample_data:
+ if (
+ export_corrected_stack_of_sample_data
+ or export_corrected_integrated_sample_data
+ ):
export_sample_images(
output_folder,
export_corrected_stack_of_sample_data,
export_corrected_integrated_sample_data,
str_list_run_number,
sample_data_combined,
- spectra_file_name=sample_master_dict[list_run_number[0]][MasterDictKeys.spectra_file_name],
+ spectra_file_name=sample_master_dict[list_run_number[0]][
+ MasterDictKeys.spectra_file_name
+ ],
spectra_array=spectra_array,
)
- _normalized_dict = perform_normalization(sample_data_combined, ob_data_combined, dc_data_combined)
- _normalized_data = _normalized_dict['normalized_data']
- _integrated_normalized_data = _normalized_dict['integrated_normalized_data']
+ _normalized_dict = perform_normalization(
+ sample_data_combined, ob_data_combined, dc_data_combined
+ )
+ _normalized_data = _normalized_dict["normalized_data"]
+ _integrated_normalized_data = _normalized_dict["integrated_normalized_data"]
integrated_normalized_data[str_list_run_number] = _integrated_normalized_data
normalized_data[str_list_run_number] = _normalized_data
- _spectrum_normalized_data = perform_spectrum_normalization(roi=roi,
- sample_data=sample_data_combined,
- ob_data_combined_for_spectrum=ob_data_combined_for_spectrum,
- dc_data_combined=dc_data_combined,
- dc_data_combined_for_spectrum=dc_data_combined_for_spectrum)
+ _spectrum_normalized_data = perform_spectrum_normalization(
+ roi=roi,
+ sample_data=sample_data_combined,
+ ob_data_combined_for_spectrum=ob_data_combined_for_spectrum,
+ dc_data_combined=dc_data_combined,
+ dc_data_combined_for_spectrum=dc_data_combined_for_spectrum,
+ )
spectrum_normalized_data[str_list_run_number] = _spectrum_normalized_data
# normalized_data[_sample_run_number] = np.array(np.divide(_sample_data, ob_data_combined))
logging.info(f"{normalized_data[str_list_run_number].shape = }")
logging.info(f"{normalized_data[str_list_run_number].dtype = }")
- logging.info(f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[str_list_run_number]))}")
- logging.info(f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[str_list_run_number]))}")
+ logging.info(
+ f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[str_list_run_number]))}"
+ )
+ logging.info(
+ f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[str_list_run_number]))}"
+ )
- detector_delay_us = sample_master_dict[list_run_number[0]][MasterDictKeys.detector_delay_us]
- time_spectra = sample_master_dict[list_run_number[0]][MasterDictKeys.list_spectra]
+ detector_delay_us = sample_master_dict[list_run_number[0]][
+ MasterDictKeys.detector_delay_us
+ ]
+ time_spectra = sample_master_dict[list_run_number[0]][
+ MasterDictKeys.list_spectra
+ ]
dict_to_return.tof_array = time_spectra
if time_spectra is None:
- logging.info("Time spectra is None, cannot convert to lambda or energy arrays")
+ logging.info(
+ "Time spectra is None, cannot convert to lambda or energy arrays"
+ )
lambda_array = None
energy_array = None
-
- else:
- logging.info(f"We have a time_spectra!")
+ else:
+ logging.info("We have a time_spectra!")
logging.info(f"time spectra shape: {time_spectra.shape}")
-
+
if detector_delay_us is None:
detector_delay_us = 0.0
- logging.info(f"detector delay is None, setting it to {detector_delay_us} us")
+ logging.info(
+ f"detector delay is None, setting it to {detector_delay_us} us"
+ )
logging.info(f"we have a detector delay of {detector_delay_us} us")
@@ -423,42 +460,46 @@ def normalization_with_list_of_full_path(
logging.info(f"Preview: {preview = }")
if preview:
- preview_normalized_data(sample_data_combined,
- ob_data_combined,
- dc_data_combined,
- normalized_data,
- lambda_array,
- energy_array,
- detector_delay_us,
- str_list_run_number,
- combine_samples,
- _spectrum_normalized_data,
- roi,
- )
-
- if export_corrected_integrated_normalized_data or export_corrected_stack_of_normalized_data:
-
- export_normalized_data(ob_master_dict=ob_master_dict,
- sample_master_dict=sample_master_dict,
+ preview_normalized_data(
+ sample_data_combined,
+ ob_data_combined,
+ dc_data_combined,
+ normalized_data,
+ lambda_array,
+ energy_array,
+ detector_delay_us,
+ str_list_run_number,
+ combine_samples,
+ _spectrum_normalized_data,
+ roi,
+ )
+
+ if (
+ export_corrected_integrated_normalized_data
+ or export_corrected_stack_of_normalized_data
+ ):
+ export_normalized_data(
+ ob_master_dict=ob_master_dict,
+ sample_master_dict=sample_master_dict,
_sample_run_number=str_list_run_number,
- normalized_data=normalized_data,
+ normalized_data=normalized_data,
integrated_normalized_data=integrated_normalized_data,
_spectrum_normalized_data=_spectrum_normalized_data,
- lambda_array=lambda_array,
- energy_array=energy_array,
- output_folder=output_folder,
+ lambda_array=lambda_array,
+ energy_array=energy_array,
+ output_folder=output_folder,
export_corrected_stack_of_normalized_data=export_corrected_stack_of_normalized_data,
export_corrected_integrated_normalized_data=export_corrected_integrated_normalized_data,
roi=roi,
spectra_array=spectra_array,
- spectra_file=sample_master_dict[list_run_number[0]][MasterDictKeys.spectra_file_name])
+ spectra_file=sample_master_dict[list_run_number[0]][
+ MasterDictKeys.spectra_file_name
+ ],
+ )
else:
-
-
# normalize the sample data
for _sample_run_number in sample_master_dict.keys():
-
logging.info("**********************************")
logging.info(f"normalization of run {_sample_run_number}")
if verbose:
@@ -468,26 +509,26 @@ def normalization_with_list_of_full_path(
# get statistics of sample data
logging_statistics_of_data(data=_sample_data, data_type=DataType.sample)
-
+
if correct_chips_alignment_flag:
- _sample_data = correct_chips_alignment(_sample_data,
- correct_chips_alignment_config,
- verbose=verbose)
-
+ _sample_data = correct_chips_alignment(
+ _sample_data, correct_chips_alignment_config, verbose=verbose
+ )
+
if normalized_by_proton_charge:
if verbose:
- display(HTML(f"Normalizing by proton charge"))
- _sample_data = normalize_by_proton_charge(sample_master_dict,
- _sample_run_number,
- _sample_data)
+ display(HTML("Normalizing by proton charge"))
+ _sample_data = normalize_by_proton_charge(
+ sample_master_dict, _sample_run_number, _sample_data
+ )
if (container_roi is not None) or (container_roi_file is not None):
- logging.info(f"Applying container normalization:")
+ logging.info("Applying container normalization:")
logging.info(f"\t {container_roi = }")
logging.info(f"\t {container_roi_file = }")
if verbose:
- display(HTML(f"Applying container normalization:"))
-
+ display(HTML("Applying container normalization:"))
+
_sample_data, container_roi_file = normalize_by_container_roi(
sample_data=_sample_data,
container_roi=container_roi,
@@ -504,54 +545,74 @@ def normalization_with_list_of_full_path(
logging.info(f"{ob_data_combined.dtype = }")
# export sample data after correction if requested
- if export_corrected_stack_of_sample_data or export_corrected_integrated_sample_data:
+ if (
+ export_corrected_stack_of_sample_data
+ or export_corrected_integrated_sample_data
+ ):
export_sample_images(
output_folder,
export_corrected_stack_of_sample_data,
export_corrected_integrated_sample_data,
_sample_run_number,
_sample_data,
- spectra_file_name=sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
spectra_array=spectra_array,
)
- _normalized_dict = perform_normalization(_sample_data, ob_data_combined, dc_data_combined)
- _normalized_data = _normalized_dict['normalized_data']
- _integrated_normalized_data = _normalized_dict['integrated_normalized_data']
+ _normalized_dict = perform_normalization(
+ _sample_data, ob_data_combined, dc_data_combined
+ )
+ _normalized_data = _normalized_dict["normalized_data"]
+ _integrated_normalized_data = _normalized_dict["integrated_normalized_data"]
integrated_normalized_data[_sample_run_number] = _integrated_normalized_data
normalized_data[_sample_run_number] = _normalized_data
- _spectrum_normalized_data = perform_spectrum_normalization(roi=roi,
- sample_data=_sample_data,
- ob_data_combined_for_spectrum=ob_data_combined_for_spectrum,
- dc_data_combined=dc_data_combined,
- dc_data_combined_for_spectrum=dc_data_combined_for_spectrum)
+ _spectrum_normalized_data = perform_spectrum_normalization(
+ roi=roi,
+ sample_data=_sample_data,
+ ob_data_combined_for_spectrum=ob_data_combined_for_spectrum,
+ dc_data_combined=dc_data_combined,
+ dc_data_combined_for_spectrum=dc_data_combined_for_spectrum,
+ )
spectrum_normalized_data[_sample_run_number] = _spectrum_normalized_data
# normalized_data[_sample_run_number] = np.array(np.divide(_sample_data, ob_data_combined))
logging.info(f"{normalized_data[_sample_run_number].shape = }")
logging.info(f"{normalized_data[_sample_run_number].dtype = }")
- logging.info(f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[_sample_run_number]))}")
- logging.info(f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[_sample_run_number]))}")
+ logging.info(
+ f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[_sample_run_number]))}"
+ )
+ logging.info(
+ f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[_sample_run_number]))}"
+ )
- detector_delay_us = sample_master_dict[_sample_run_number][MasterDictKeys.detector_delay_us]
- time_spectra = sample_master_dict[_sample_run_number][MasterDictKeys.list_spectra]
+ detector_delay_us = sample_master_dict[_sample_run_number][
+ MasterDictKeys.detector_delay_us
+ ]
+ time_spectra = sample_master_dict[_sample_run_number][
+ MasterDictKeys.list_spectra
+ ]
dict_to_return.tof_array = time_spectra
if time_spectra is None:
- logging.info("Time spectra is None, cannot convert to lambda or energy arrays")
+ logging.info(
+ "Time spectra is None, cannot convert to lambda or energy arrays"
+ )
lambda_array = None
energy_array = None
-
- else:
- logging.info(f"We have a time_spectra!")
+ else:
+ logging.info("We have a time_spectra!")
logging.info(f"time spectra shape: {time_spectra.shape}")
-
+
if detector_delay_us is None:
detector_delay_us = 0.0
- logging.info(f"detector delay is None, setting it to {detector_delay_us} us")
+ logging.info(
+ f"detector delay is None, setting it to {detector_delay_us} us"
+ )
logging.info(f"we have a detector delay of {detector_delay_us} us")
@@ -584,36 +645,43 @@ def normalization_with_list_of_full_path(
logging.info(f"Preview: {preview = }")
if preview:
- preview_normalized_data(_sample_data,
- ob_data_combined,
- dc_data_combined,
- normalized_data,
- lambda_array,
- energy_array,
- detector_delay_us,
- _sample_run_number,
- combine_samples,
- _spectrum_normalized_data,
- roi,
- )
-
- if export_corrected_integrated_normalized_data or export_corrected_stack_of_normalized_data:
-
- export_normalized_data(ob_master_dict=ob_master_dict,
- sample_master_dict=sample_master_dict,
+ preview_normalized_data(
+ _sample_data,
+ ob_data_combined,
+ dc_data_combined,
+ normalized_data,
+ lambda_array,
+ energy_array,
+ detector_delay_us,
+ _sample_run_number,
+ combine_samples,
+ _spectrum_normalized_data,
+ roi,
+ )
+
+ if (
+ export_corrected_integrated_normalized_data
+ or export_corrected_stack_of_normalized_data
+ ):
+ export_normalized_data(
+ ob_master_dict=ob_master_dict,
+ sample_master_dict=sample_master_dict,
_sample_run_number=_sample_run_number,
- normalized_data=normalized_data,
+ normalized_data=normalized_data,
integrated_normalized_data=integrated_normalized_data,
_spectrum_normalized_data=_spectrum_normalized_data,
- lambda_array=lambda_array,
- energy_array=energy_array,
- output_folder=output_folder,
+ lambda_array=lambda_array,
+ energy_array=energy_array,
+ output_folder=output_folder,
export_corrected_stack_of_normalized_data=export_corrected_stack_of_normalized_data,
export_corrected_integrated_normalized_data=export_corrected_integrated_normalized_data,
roi=roi,
spectra_array=spectra_array,
- spectra_file=sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name])
-
+ spectra_file=sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
+ )
+
# if combine_samples:
# # combine all normalized data
@@ -627,7 +695,7 @@ def normalization_with_list_of_full_path(
# # if preview, display the combined normalized data
# if preview:
-
+
# fig, axs3 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
# normalized_data_integrated = np.nanmean(combined_normalized_data, axis=0)
# im2 = axs3[0].imshow(normalized_data_integrated, cmap="gray")
@@ -641,7 +709,7 @@ def normalization_with_list_of_full_path(
# else:
# profile_step1 = np.nanmean(combined_normalized_data, axis=1)
# profile = np.nanmean(profile_step1, axis=1)
-
+
# axs3[1].plot(profile, 'o', markersize=MARKERSIZE, label=_label)
# axs3[1].set_xlabel("File image index")
# axs3[1].set_ylabel("Transmission (a.u.)")
@@ -673,15 +741,15 @@ def normalization_with_list_of_full_path(
# fig, axs5 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
# logging.info(f"{np.shape(profile) = }")
- # axs5[0].plot(lambda_array, combined_spectrum_normalized_data, "r*",
- # markersize=MARKERSIZE,
+ # axs5[0].plot(lambda_array, combined_spectrum_normalized_data, "r*",
+ # markersize=MARKERSIZE,
# label="spectrum normalization of ROI")
# axs5[0].set_xlabel("Lambda (A)")
# axs5[0].set_ylabel("mean of full image")
# axs5[0].legend()
# axs5[1].plot(energy_array, combined_spectrum_normalized_data, "r*",
- # markersize=MARKERSIZE,
+ # markersize=MARKERSIZE,
# label="spectrum normalization of ROI")
# axs5[1].set_xlabel("Energy (eV)")
# axs5[1].set_ylabel("Transmission (a.u.)")
@@ -700,9 +768,9 @@ def normalization_with_list_of_full_path(
# export_corrected_stack_of_combined_normalized_data=export_corrected_stack_of_combined_normalized_data,
# lambda_array=lambda_array,
# energy_array=energy_array,
- # output_folder=output_folder,
+ # output_folder=output_folder,
# spectra_array=spectra_array)
-
+
# else:
# dict_to_return.data = normalized_data
@@ -713,4 +781,3 @@ def normalization_with_list_of_full_path(
display(HTML("Normalization and export is done!"))
return dict_to_return
-
diff --git a/notebooks/__code/normalization_tof/normalization_tof.py b/notebooks/__code/normalization_tof/normalization_tof.py
index cfedd34f..838d3418 100644
--- a/notebooks/__code/normalization_tof/normalization_tof.py
+++ b/notebooks/__code/normalization_tof/normalization_tof.py
@@ -1,10 +1,8 @@
import glob
import logging
import logging as notebook_logging
-from multiprocessing.util import debug
import os
from pathlib import Path
-from arrow import get
import numpy as np
import pandas as pd
@@ -20,12 +18,16 @@
from __code._utilities.list import extract_list_of_runs_from_string
from __code._utilities.nexus import extract_file_path_from_nexus
from __code.normalization_tof import DataType
-from __code._utilities.time import get_current_time_in_special_file_name_format
-from __code._utilities.json import save_json, load_json
+from __code._utilities.json import load_json
# from __code.ipywe.myfileselector import MyFileSelectorPanel
from __code.ipywe.fileselector import FileSelectorPanel as MyFileSelectorPanel
-from __code.normalization_tof import DetectorType, autoreduce_dir, distance_source_detector_m, raw_dir
+from __code.normalization_tof import (
+ DetectorType,
+ autoreduce_dir,
+ distance_source_detector_m,
+ raw_dir,
+)
from __code.normalization_tof.config import DEBUG_DATA, timepix1_config, timepix3_config
from __code.normalization_tof.normalization_for_timepix1_timepix3 import (
load_data_using_multithreading,
@@ -39,9 +41,9 @@ class NormalizationTof:
sample_folder = None
sample_run_numbers = None
sample_run_numbers_selected = None
-
+
# if the spectra file is missing, the program will create the spectra array on the fly
- spectra_array = None
+ spectra_array = None
spectra_file_found = True
list_spectra_file_found = []
@@ -58,7 +60,7 @@ class NormalizationTof:
dc_run_numbers_selected = None
output_folder = None
-
+
# {'full_path_data': {'data': None, 'nexus': None}}
dict_sample = {}
dict_ob = {}
@@ -72,15 +74,15 @@ class NormalizationTof:
dict_dc_data = None
roi = None # full spectrum ROI
-
+
# container
- container_roi = None # container only ROI
+ container_roi = None # container only ROI
default_roi = Roi(left=50, top=50, width=200, height=200)
default_container_roi = Roi(left=150, top=150, width=40, height=40)
we_need_to_automatically_save_the_container_roi = False
rect_container = None
container_roi_file = None
-
+
def initialize(self):
LOG_PATH = "/SNS/VENUS/shared/log/"
file_name, ext = os.path.splitext(os.path.basename(__file__))
@@ -98,21 +100,26 @@ def initialize(self):
# logging.info(f"Creating instance of {cls.__name__}")
def __init__(self, working_dir=None, debug=False):
-
self.initialize()
if debug:
self.working_dir = DEBUG_DATA.working_dir
self.shared_dir = self.working_dir + "/shared"
self.output_dir = DEBUG_DATA.output_folder
- self.default_roi = Roi(left=DEBUG_DATA.roi[0], top=DEBUG_DATA.roi[1],
- width=DEBUG_DATA.roi[2], height=DEBUG_DATA.roi[3])
- self.default_container_roi = Roi(left=DEBUG_DATA.container_roi[0],
- top=DEBUG_DATA.container_roi[1],
- width=DEBUG_DATA.container_roi[2],
- height=DEBUG_DATA.container_roi[3])
+ self.default_roi = Roi(
+ left=DEBUG_DATA.roi[0],
+ top=DEBUG_DATA.roi[1],
+ width=DEBUG_DATA.roi[2],
+ height=DEBUG_DATA.roi[3],
+ )
+ self.default_container_roi = Roi(
+ left=DEBUG_DATA.container_roi[0],
+ top=DEBUG_DATA.container_roi[1],
+ width=DEBUG_DATA.container_roi[2],
+ height=DEBUG_DATA.container_roi[3],
+ )
self.detector_type = DEBUG_DATA.detector_type
-
+
else:
self.working_dir = working_dir
self.shared_dir = os.path.join(self.working_dir, "shared")
@@ -137,7 +144,9 @@ def __init__(self, working_dir=None, debug=False):
notebook_logging.info(f"nexus folder: {self.nexus_folder}")
notebook_logging.info(f"Shared dir: {self.shared_dir}")
- display(HTML("Select detector type"))
+ display(
+ HTML("Select detector type")
+ )
self.detector_type_widget = widgets.Dropdown(
options=[DetectorType.tpx1_legacy, DetectorType.tpx1, DetectorType.tpx3],
value=self.detector_type,
@@ -145,7 +154,7 @@ def __init__(self, working_dir=None, debug=False):
disabled=False,
)
display(self.detector_type_widget)
-
+
def reset_sample_dicts(self):
self.dict_sample = {}
self.dict_short_name_full_path["sample"] = {}
@@ -168,7 +177,9 @@ def reset_dc_dicts(self):
def setup_default_paths(self):
notebook_logging.info("Setting up default paths...")
self.detector_type = self.detector_type_widget.value
- self.raw_dir = Path(raw_dir[self.instrument][self.detector_type][0]) / str(self.ipts)
+ self.raw_dir = Path(raw_dir[self.instrument][self.detector_type][0]) / str(
+ self.ipts
+ )
self.autoreduce_dir = (
Path(autoreduce_dir[self.instrument][self.detector_type][0])
/ str(self.ipts)
@@ -200,7 +211,9 @@ def select_sample_run_numbers(self):
)
self.sample_run_numbers_widget = widgets.Textarea(
- value=str_sample_run_numbers, placeholder="", layout=widgets.Layout(width="400px")
+ value=str_sample_run_numbers,
+ placeholder="",
+ layout=widgets.Layout(width="400px"),
)
vertical_layout = widgets.VBox(
[
@@ -230,9 +243,13 @@ def retrieve_file_path_from_nexus(self, run_number):
Retrieve the full path to the NeXus file for the given run number.
This function should be implemented to read the NeXus file and extract the path.
"""
- notebook_logging.info(f"Retrieving file path from NeXus for run number: {run_number}")
+ notebook_logging.info(
+ f"Retrieving file path from NeXus for run number: {run_number}"
+ )
# Placeholder implementation, replace with actual logic to read NeXus file
- nexus_file_path = Path(self.nexus_folder) / f"{self.instrument.upper()}_{run_number}.nxs.h5"
+ nexus_file_path = (
+ Path(self.nexus_folder) / f"{self.instrument.upper()}_{run_number}.nxs.h5"
+ )
notebook_logging.info(f"\tNeXus file path: {nexus_file_path}")
if nexus_file_path.exists():
return extract_file_path_from_nexus(nexus_file_path)
@@ -243,7 +260,9 @@ def extract_full_path(self, run_number=None):
"""
Extract the full path to the run number based on the detector type.
"""
- notebook_logging.info(f"Extracting full path for run number: {run_number} with detector type: {self.detector_type}")
+ notebook_logging.info(
+ f"Extracting full path for run number: {run_number} with detector type: {self.detector_type}"
+ )
if run_number is None:
raise ValueError("Run number must be provided")
@@ -261,7 +280,7 @@ def extract_full_path(self, run_number=None):
file_path = Path(self.raw_dir) / file_path
if file_path is None:
raise ValueError(f"No full path file found for run number {run_number}")
-
+
return file_path
else:
@@ -283,19 +302,21 @@ def display_infos(self, input_full_path=None, spectra_file_found=True):
spectra_cell_color = "green" if spectra_file_found else "red"
# present result in a table
- display(HTML(f"""
+ display(
+ HTML(f"""
Information for run: {os.path.basename(input_full_path)}
| Nbr TIFF | Images height | Images width | Data Type | Spectra File Found |
| {nbr_tiff} | {shape[0]} | {shape[1]} | {dtype} | {spectra_file_found} |
- """))
+ """)
+ )
def _is_spectra_file_found_and_list(self, full_path):
list_files = glob.glob(os.path.join(full_path, "*_Spectra.txt"))
if len(list_files) == 0:
return False, None
-
+
return os.path.exists(list_files[0]), list_files[0]
def check_sample(self):
@@ -308,7 +329,9 @@ def check_sample(self):
display(HTML("Sample run numbers selected:"))
if self.sample_run_numbers_widget.value.strip() != "":
- list_of_runs = extract_list_of_runs_from_string(self.sample_run_numbers_widget.value)
+ list_of_runs = extract_list_of_runs_from_string(
+ self.sample_run_numbers_widget.value
+ )
notebook_logging.info(f"\t{list_of_runs = }")
list_of_sample_full_path = []
@@ -317,46 +340,79 @@ def check_sample(self):
_full_path = self.extract_full_path(run_number=_run)
list_of_sample_full_path.append(_full_path)
except TypeError as e:
- notebook_logging.error(f"Error extracting full path for run number {_run}: {e}")
- display(HTML(f"Error extracting full path for run number {_run}: File not found!"))
+ notebook_logging.error(
+ f"Error extracting full path for run number {_run}: {e}"
+ )
+ display(
+ HTML(
+ f"Error extracting full path for run number {_run}: File not found!"
+ )
+ )
continue
logging.info(f"\t{list_of_sample_full_path = }")
for _file_full_path in list_of_sample_full_path:
-
if os.path.exists(_file_full_path):
- notebook_logging.info(f"\tSample run number {_file_full_path} - FOUND")
- is_valid_run, report_dict = self.check_folder_is_valid(_file_full_path)
+ notebook_logging.info(
+ f"\tSample run number {_file_full_path} - FOUND"
+ )
+ is_valid_run, report_dict = self.check_folder_is_valid(
+ _file_full_path
+ )
if is_valid_run:
nbr_tiff = report_dict["nbr_tiff"]
self.check_nbr_tiff[DataType.sample].append(nbr_tiff)
- notebook_logging.info(f"\tSample run number {_file_full_path} - FOUND with {nbr_tiff} tif* files")
- display(HTML(f"{_file_full_path} - OK"))
+ notebook_logging.info(
+ f"\tSample run number {_file_full_path} - FOUND with {nbr_tiff} tif* files"
+ )
+ display(
+ HTML(
+ f"{_file_full_path} - OK"
+ )
+ )
self.dict_sample[_file_full_path] = {}
- self.dict_short_name_full_path["sample"][os.path.basename(_file_full_path)] = _file_full_path
-
- _is_spectra_file_found, spectra_file_name = self._is_spectra_file_found_and_list(_file_full_path)
+ self.dict_short_name_full_path["sample"][
+ os.path.basename(_file_full_path)
+ ] = _file_full_path
+
+ _is_spectra_file_found, spectra_file_name = (
+ self._is_spectra_file_found_and_list(_file_full_path)
+ )
if not _is_spectra_file_found:
self.spectra_file_found = False
else:
self.list_spectra_file_found.append(spectra_file_name)
-
- self.display_infos(input_full_path=_file_full_path,
- spectra_file_found=self.spectra_file_found)
+
+ self.display_infos(
+ input_full_path=_file_full_path,
+ spectra_file_found=self.spectra_file_found,
+ )
else:
- display(HTML(f"{_file_full_path} - EMPTY!"))
+ display(
+ HTML(
+ f"{_file_full_path} - EMPTY!"
+ )
+ )
else:
- notebook_logging.info(f"\tSample run number {_file_full_path} - NOT FOUND")
- display(HTML(f"{_file_full_path} - NOT FOUND!"))
+ notebook_logging.info(
+ f"\tSample run number {_file_full_path} - NOT FOUND"
+ )
+ display(
+ HTML(
+ f"{_file_full_path} - NOT FOUND!"
+ )
+ )
else:
- notebook_logging.info(f"Sample run numbers selected: {self.sample_run_numbers_selected}")
+ notebook_logging.info(
+ f"Sample run numbers selected: {self.sample_run_numbers_selected}"
+ )
if self.sample_run_numbers_selected is None:
- display(HTML(f"No sample runs selected!"))
+ display(HTML("No sample runs selected!"))
return
-
+
for _run in self.sample_run_numbers_selected:
_run = os.path.abspath(_run)
if os.path.exists(_run):
@@ -367,35 +423,54 @@ def check_sample(self):
nbr_tiff = report_dict["nbr_tiff"]
self.check_nbr_tiff[DataType.sample].append(nbr_tiff)
display(HTML(f"{_run} - OK"))
- notebook_logging.info(f"\tfolder seems to be a valid folder containing {nbr_tiff} tif* files")
+ notebook_logging.info(
+ f"\tfolder seems to be a valid folder containing {nbr_tiff} tif* files"
+ )
self.dict_sample[_run] = {}
- self.dict_short_name_full_path["sample"][os.path.basename(_run)] = _run
-
- _is_spectra_file_found, spectra_file_name = self._is_spectra_file_found_and_list(_run)
+ self.dict_short_name_full_path["sample"][
+ os.path.basename(_run)
+ ] = _run
+
+ _is_spectra_file_found, spectra_file_name = (
+ self._is_spectra_file_found_and_list(_run)
+ )
if not _is_spectra_file_found:
self.spectra_file_found = False
else:
self.list_spectra_file_found.append(spectra_file_name)
-
- self.display_infos(input_full_path=_run,
- spectra_file_found=self.spectra_file_found)
+
+ self.display_infos(
+ input_full_path=_run,
+ spectra_file_found=self.spectra_file_found,
+ )
else:
display(HTML(f"{_run} - EMPTY!"))
else:
- display(HTML(f"{_run} - NOT FOUND! - ERROR!"))
+ display(
+ HTML(
+ f"{_run} - NOT FOUND! - ERROR!"
+ )
+ )
notebook_logging.info(f"\tSample run number {_run} - NOT FOUND!")
-
+
self.sample_run_numbers_selected = None
if len(set(self.check_nbr_tiff[DataType.sample])) > 1:
- display(HTML(f"Warning: Different number of TIFF files found in selected sample runs: {self.check_nbr_tiff[DataType.sample]}"))
- notebook_logging.info(f"WARNING:Different number of TIFF files found in selected sample runs: {self.check_nbr_tiff[DataType.sample]}")
+ display(
+ HTML(
+ f"Warning: Different number of TIFF files found in selected sample runs: {self.check_nbr_tiff[DataType.sample]}"
+ )
+ )
+ notebook_logging.info(
+ f"WARNING:Different number of TIFF files found in selected sample runs: {self.check_nbr_tiff[DataType.sample]}"
+ )
def select_ob_folder(self):
- self.select_folder(instruction="Browse ob top folder", next_function=self.ob_folder_selected)
+ self.select_folder(
+ instruction="Browse ob top folder", next_function=self.ob_folder_selected
+ )
def select_ob_run_numbers(self):
-
if self.debug:
ob_runs = DEBUG_DATA.ob_runs_selected
ob_run_numbers_list = []
@@ -409,10 +484,14 @@ def select_ob_run_numbers(self):
else:
str_ob_run_numbers = ""
- ob_label = widgets.HTML(value="List of ob run numbers (ex: 8705, 8707)")
+ ob_label = widgets.HTML(
+ value="List of ob run numbers (ex: 8705, 8707)"
+ )
self.ob_run_numbers_widget = widgets.Textarea(
- value=str_ob_run_numbers, placeholder="", layout=widgets.Layout(width="400px")
+ value=str_ob_run_numbers,
+ placeholder="",
+ layout=widgets.Layout(width="400px"),
)
vertical_layout = widgets.VBox(
[
@@ -441,7 +520,9 @@ def check_ob(self):
self.reset_ob_dicts()
if self.ob_run_numbers_widget.value.strip() != "":
- list_of_runs = extract_list_of_runs_from_string(self.ob_run_numbers_widget.value)
+ list_of_runs = extract_list_of_runs_from_string(
+ self.ob_run_numbers_widget.value
+ )
notebook_logging.info(f"\t{list_of_runs = }")
list_of_ob_full_path = []
@@ -450,43 +531,75 @@ def check_ob(self):
_full_path = self.extract_full_path(run_number=_run)
list_of_ob_full_path.append(_full_path)
except TypeError as e:
- notebook_logging.error(f"Error extracting full path for run number {_run}: {e}")
- display(HTML(f"Error extracting full path for run number {_run}: File not found!"))
+ notebook_logging.error(
+ f"Error extracting full path for run number {_run}: {e}"
+ )
+ display(
+ HTML(
+ f"Error extracting full path for run number {_run}: File not found!"
+ )
+ )
continue
for _file_full_path in list_of_ob_full_path:
if os.path.exists(_file_full_path):
notebook_logging.info(f"\tOB run number {_file_full_path} - FOUND")
- is_valid_run, report_dict = self.check_folder_is_valid(_file_full_path)
+ is_valid_run, report_dict = self.check_folder_is_valid(
+ _file_full_path
+ )
if is_valid_run:
nbr_tiff = report_dict["nbr_tiff"]
self.check_nbr_tiff[DataType.ob].append(nbr_tiff)
- notebook_logging.info(f"\tOB run number {_file_full_path} - FOUND with {nbr_tiff} tif* files")
- display(HTML(f"{_file_full_path} - OK"))
+ notebook_logging.info(
+ f"\tOB run number {_file_full_path} - FOUND with {nbr_tiff} tif* files"
+ )
+ display(
+ HTML(
+ f"{_file_full_path} - OK"
+ )
+ )
self.dict_ob[_file_full_path] = {}
- self.dict_short_name_full_path["ob"][os.path.basename(_file_full_path)] = _file_full_path
-
- _is_spectra_file_found, spectra_file_name = self._is_spectra_file_found_and_list(_file_full_path)
+ self.dict_short_name_full_path["ob"][
+ os.path.basename(_file_full_path)
+ ] = _file_full_path
+
+ _is_spectra_file_found, spectra_file_name = (
+ self._is_spectra_file_found_and_list(_file_full_path)
+ )
if not _is_spectra_file_found:
self.spectra_file_found = False
else:
self.list_spectra_file_found.append(spectra_file_name)
-
- self.display_infos(input_full_path=_file_full_path,
- spectra_file_found=self.spectra_file_found)
-
+
+ self.display_infos(
+ input_full_path=_file_full_path,
+ spectra_file_found=self.spectra_file_found,
+ )
+
else:
- display(HTML(f"{_file_full_path} - EMPTY!"))
+ display(
+ HTML(
+ f"{_file_full_path} - EMPTY!"
+ )
+ )
else:
- notebook_logging.info(f"\tOB run number {_file_full_path} - NOT FOUND")
- display(HTML(f"{_file_full_path} - NOT FOUND!"))
+ notebook_logging.info(
+ f"\tOB run number {_file_full_path} - NOT FOUND"
+ )
+ display(
+ HTML(
+ f"{_file_full_path} - NOT FOUND!"
+ )
+ )
else:
- notebook_logging.info(f"OB run numbers selected: {self.ob_run_numbers_selected}")
+ notebook_logging.info(
+ f"OB run numbers selected: {self.ob_run_numbers_selected}"
+ )
if self.ob_run_numbers_selected is None:
- display(HTML(f"No OB runs selected!"))
+ display(HTML("No OB runs selected!"))
return
-
+
for _run in self.ob_run_numbers_selected:
_run = os.path.abspath(_run)
if os.path.exists(_run):
@@ -497,40 +610,66 @@ def check_ob(self):
nbr_tiff = report_dict["nbr_tiff"]
self.check_nbr_tiff[DataType.ob].append(nbr_tiff)
display(HTML(f"{_run} - OK"))
- notebook_logging.info(f"\tfolder seems to be a valid folder containing {nbr_tiff} tif* files")
- self.dict_short_name_full_path["ob"][os.path.basename(_run)] = _run
+ notebook_logging.info(
+ f"\tfolder seems to be a valid folder containing {nbr_tiff} tif* files"
+ )
+ self.dict_short_name_full_path["ob"][os.path.basename(_run)] = (
+ _run
+ )
self.dict_ob[_run] = {}
- _is_spectra_file_found, spectra_file_name = self._is_spectra_file_found_and_list(_run)
+ _is_spectra_file_found, spectra_file_name = (
+ self._is_spectra_file_found_and_list(_run)
+ )
if not _is_spectra_file_found:
self.spectra_file_found = False
else:
self.list_spectra_file_found.append(spectra_file_name)
-
- self.display_infos(input_full_path=_run,
- spectra_file_found=self.spectra_file_found)
-
+
+ self.display_infos(
+ input_full_path=_run,
+ spectra_file_found=self.spectra_file_found,
+ )
+
else:
display(HTML(f"{_run} - EMPTY!"))
else:
- display(HTML(f"{_run} - NOT FOUND! - ERROR!"))
+ display(
+ HTML(
+ f"{_run} - NOT FOUND! - ERROR!"
+ )
+ )
notebook_logging.info(f"\tOB run number {_run} - NOT FOUND!")
self.ob_run_numbers_selected = None
if len(set(self.check_nbr_tiff[DataType.ob])) > 1:
- display(HTML(f"Warning: Different number of TIFF files found in selected OB runs: {self.check_nbr_tiff[DataType.ob]}"))
- notebook_logging.info(f"WARNING: Different number of TIFF files found in selected OB runs: {self.check_nbr_tiff[DataType.ob]}")
+ display(
+ HTML(
+ f"Warning: Different number of TIFF files found in selected OB runs: {self.check_nbr_tiff[DataType.ob]}"
+ )
+ )
+ notebook_logging.info(
+ f"WARNING: Different number of TIFF files found in selected OB runs: {self.check_nbr_tiff[DataType.ob]}"
+ )
else:
- if self.check_nbr_tiff[DataType.ob][0] != self.check_nbr_tiff[DataType.sample][0]:
- display(HTML(f"Not valid OB runs found (different number of OB and sample TIFF files)!"))
+ if (
+ self.check_nbr_tiff[DataType.ob][0]
+ != self.check_nbr_tiff[DataType.sample][0]
+ ):
+ display(
+ HTML(
+ "Not valid OB runs found (different number of OB and sample TIFF files)!"
+ )
+ )
notebook_logging.info("WARNING: Not valid OB runs found!")
def select_dc_run_numbers(self):
- self.select_folder(instruction="Browse dc top folder", next_function=self.dc_folder_selected)
+ self.select_folder(
+ instruction="Browse dc top folder", next_function=self.dc_folder_selected
+ )
def select_dc_run_numbers(self):
-
if self.debug:
dc_runs = DEBUG_DATA.dc_runs_selected
if dc_runs:
@@ -545,10 +684,14 @@ def select_dc_run_numbers(self):
else:
str_dc_run_numbers = ""
- dc_label = widgets.HTML(value="List of dc run numbers (ex: 8705, 8707)")
+ dc_label = widgets.HTML(
+ value="List of dc run numbers (ex: 8705, 8707)"
+ )
self.dc_run_numbers_widget = widgets.Textarea(
- value=str_dc_run_numbers, placeholder="", layout=widgets.Layout(width="400px")
+ value=str_dc_run_numbers,
+ placeholder="",
+ layout=widgets.Layout(width="400px"),
)
vertical_layout = widgets.VBox(
[
@@ -577,7 +720,9 @@ def check_dc(self):
self.reset_dc_dicts()
if self.dc_run_numbers_widget.value.strip() != "":
- list_of_runs = extract_list_of_runs_from_string(self.dc_run_numbers_widget.value)
+ list_of_runs = extract_list_of_runs_from_string(
+ self.dc_run_numbers_widget.value
+ )
notebook_logging.info(f"\t{list_of_runs = }")
list_of_dc_full_path = []
@@ -588,25 +733,47 @@ def check_dc(self):
for _file_full_path in list_of_dc_full_path:
if os.path.exists(_file_full_path):
notebook_logging.info(f"\tDC run number {_file_full_path} - FOUND")
- is_valid_run, report_dict = self.check_folder_is_valid(_file_full_path)
+ is_valid_run, report_dict = self.check_folder_is_valid(
+ _file_full_path
+ )
if is_valid_run:
nbr_tiff = report_dict["nbr_tiff"]
self.check_nbr_tiff[DataType.dc].append(nbr_tiff)
- notebook_logging.info(f"\tDC run number {_file_full_path} - FOUND with {nbr_tiff} tif* files")
- display(HTML(f"{_file_full_path} - OK"))
+ notebook_logging.info(
+ f"\tDC run number {_file_full_path} - FOUND with {nbr_tiff} tif* files"
+ )
+ display(
+ HTML(
+ f"{_file_full_path} - OK"
+ )
+ )
self.dict_dc[_file_full_path] = {}
- self.dict_short_name_full_path["dc"][os.path.basename(_file_full_path)] = _file_full_path
+ self.dict_short_name_full_path["dc"][
+ os.path.basename(_file_full_path)
+ ] = _file_full_path
self.display_infos(input_full_path=_file_full_path)
else:
- display(HTML(f"{_file_full_path} - EMPTY!"))
+ display(
+ HTML(
+ f"{_file_full_path} - EMPTY!"
+ )
+ )
else:
- notebook_logging.info(f"\tDC run number {_file_full_path} - NOT FOUND")
- display(HTML(f"{_file_full_path} - NOT FOUND!"))
+ notebook_logging.info(
+ f"\tDC run number {_file_full_path} - NOT FOUND"
+ )
+ display(
+ HTML(
+ f"{_file_full_path} - NOT FOUND!"
+ )
+ )
else:
- notebook_logging.info(f"DC run numbers selected: {self.dc_run_numbers_selected}")
+ notebook_logging.info(
+ f"DC run numbers selected: {self.dc_run_numbers_selected}"
+ )
if self.dc_run_numbers_selected is None:
- display(HTML(f"No DC runs selected!"))
+ display(HTML("No DC runs selected!"))
return
for _run in self.dc_run_numbers_selected:
@@ -619,24 +786,45 @@ def check_dc(self):
nbr_tiff = report_dict["nbr_tiff"]
self.check_nbr_tiff[DataType.dc].append(nbr_tiff)
display(HTML(f"{_run} - OK"))
- notebook_logging.info(f"\tfolder seems to be a valid folder containing {nbr_tiff} tif* files")
- self.dict_short_name_full_path["dc"][os.path.basename(_run)] = _run
+ notebook_logging.info(
+ f"\tfolder seems to be a valid folder containing {nbr_tiff} tif* files"
+ )
+ self.dict_short_name_full_path["dc"][os.path.basename(_run)] = (
+ _run
+ )
self.dict_dc[_run] = {}
self.display_infos(input_full_path=_run)
else:
display(HTML(f"{_run} - EMPTY!"))
else:
- display(HTML(f"{_run} - NOT FOUND! - ERROR!"))
+ display(
+ HTML(
+ f"{_run} - NOT FOUND! - ERROR!"
+ )
+ )
notebook_logging.info(f"\tDC run number {_run} - NOT FOUND!")
self.dc_run_numbers_selected = None
if len(set(self.check_nbr_tiff[DataType.dc])) > 1:
- display(HTML(f"Warning: Different number of TIFF files found in selected DC runs: {self.check_nbr_tiff[DataType.dc]}"))
- notebook_logging.info(f"WARNING: Different number of TIFF files found in selected DC runs: {self.check_nbr_tiff[DataType.dc]}")
+ display(
+ HTML(
+ f"Warning: Different number of TIFF files found in selected DC runs: {self.check_nbr_tiff[DataType.dc]}"
+ )
+ )
+ notebook_logging.info(
+ f"WARNING: Different number of TIFF files found in selected DC runs: {self.check_nbr_tiff[DataType.dc]}"
+ )
else:
- if self.check_nbr_tiff[DataType.dc][0] != self.check_nbr_tiff[DataType.sample][0]:
- display(HTML(f"No valid DC runs found (different number of DC and sample TIFF files)"))
+ if (
+ self.check_nbr_tiff[DataType.dc][0]
+ != self.check_nbr_tiff[DataType.sample][0]
+ ):
+ display(
+ HTML(
+ "No valid DC runs found (different number of DC and sample TIFF files)"
+ )
+ )
notebook_logging.info("WARNING: No valid DC runs found!")
def _load_and_get_integrated_ob(self, full_path):
@@ -650,11 +838,17 @@ def _load_and_get_integrated_ob(self, full_path):
if self.dict_ob[full_path].get("data") is None:
notebook_logging.info("No data found for this OB run, loading it now...")
# load the data from the OB run
- notebook_logging.info(f"\tFull path to OB run: {os.path.basename(full_path)}")
+ notebook_logging.info(
+ f"\tFull path to OB run: {os.path.basename(full_path)}"
+ )
list_tiff = retrieve_list_of_tif(full_path)
notebook_logging.info(f"\tNumber of TIFF files found: {len(list_tiff)}")
if len(list_tiff) == 0:
- display(HTML(f"No TIFF files found in {full_path}!"))
+ display(
+ HTML(
+ f"No TIFF files found in {full_path}!"
+ )
+ )
notebook_logging.error(f"No TIFF files found in {full_path}!")
return None
data = load_data_using_multithreading(list_tiff, combine_tof=True)
@@ -691,7 +885,9 @@ def preview_ob_runs(self):
im = ax.imshow(integrated_ob, cmap="viridis", aspect="auto")
ax.set_aspect("equal")
ax.set_title(f"Integrated OB run: {list_ob_short_runs[0]}")
- fig.colorbar(im, ax=ax, orientation="vertical", label="Intensity", shrink=0.8)
+ fig.colorbar(
+ im, ax=ax, orientation="vertical", label="Intensity", shrink=0.8
+ )
plt.show()
else:
@@ -730,30 +926,46 @@ def display_ob_run(short_name):
def checking_spectra_files(self):
if not self.spectra_file_found:
-
if len(self.list_spectra_file_found) > 0:
- display(HTML("Some spectra files were NOT found but on the good side, some spectra files were found. The first one of those spectra file will be used for all!"))
- notebook_logging.info("Some spectra files were NOT found but on the good side, some spectra files were found. The first one of those spectra file will be used for all!")
+ display(
+ HTML(
+ "Some spectra files were NOT found but on the good side, some spectra files were found. The first one of those spectra file will be used for all!"
+ )
+ )
+ notebook_logging.info(
+ "Some spectra files were NOT found but on the good side, some spectra files were found. The first one of those spectra file will be used for all!"
+ )
spectra_file_to_use = self.list_spectra_file_found[0]
pd_spectra = pd.read_csv(spectra_file_to_use, sep=",", header=0)
self.spectra_array = np.array(pd_spectra["shutter_time"].values)
- notebook_logging.info(f"Using spectra file: {spectra_file_to_use} with {len(self.spectra_array)} TOF channels.")
+ notebook_logging.info(
+ f"Using spectra file: {spectra_file_to_use} with {len(self.spectra_array)} TOF channels."
+ )
else:
- display(HTML("Error: No spectra files were found in the selected runs! We need to create the spectra file."))
- notebook_logging.error("Error: No spectra files were found in the selected runs! Manually creating the spectra file needed!")
-
+ display(
+ HTML(
+ "Error: No spectra files were found in the selected runs! We need to create the spectra file."
+ )
+ )
+ notebook_logging.error(
+ "Error: No spectra files were found in the selected runs! Manually creating the spectra file needed!"
+ )
+
self.manually_create_spectra_array()
-
+
else:
- display(HTML("All selected runs have the spectra file."))
+ display(
+ HTML(
+ "All selected runs have the spectra file."
+ )
+ )
notebook_logging.info("All selected runs have the spectra file.")
def manually_create_spectra_array(self):
-
- label = widgets.Label(f"Enter the TOF bins size (in nS)")
+ label = widgets.Label("Enter the TOF bins size (in nS)")
self.tof_bin_size_widget = widgets.IntText(
value=700,
min=100,
@@ -774,37 +986,56 @@ def manually_create_spectra_array(self):
def create_spectra_arrays_clicked(self, b):
tof_bin_size = self.tof_bin_size_widget.value
- notebook_logging.info(f"Creating spectra arrays with TOF bin size: {tof_bin_size} nS")
+ notebook_logging.info(
+ f"Creating spectra arrays with TOF bin size: {tof_bin_size} nS"
+ )
# get the number of TOF channels from the first sample run
first_sample_run = list(self.dict_sample.keys())[0]
list_tiff = retrieve_list_of_tif(first_sample_run)
- if len(list_tiff) == 0:
- display(HTML(f"No TIFF files found in {first_sample_run}!"))
+ if len(list_tiff) == 0:
+ display(
+ HTML(
+ f"No TIFF files found in {first_sample_run}!"
+ )
+ )
notebook_logging.error(f"No TIFF files found in {first_sample_run}!")
return
nbr_files = len(list_tiff)
tof_bin_size_in_s = tof_bin_size * 1e-9 # convert nS to seconds
-
- spectra_array = np.arange(0.0001e-7, nbr_files * tof_bin_size_in_s, tof_bin_size_in_s) # do not start at 0 to avoid log binning issues in iBeatles
+
+ spectra_array = np.arange(
+ 0.0001e-7, nbr_files * tof_bin_size_in_s, tof_bin_size_in_s
+ ) # do not start at 0 to avoid log binning issues in iBeatles
self.spectra_array = spectra_array
- display(HTML(f"Created spectra arrays with TOF bin size: {tof_bin_size} nS!"))
- notebook_logging.info(f"Created spectra arrays with TOF bin size: {tof_bin_size} nS and {len(spectra_array) = } ... Done!")
+ display(
+ HTML(
+ f"Created spectra arrays with TOF bin size: {tof_bin_size} nS!"
+ )
+ )
+ notebook_logging.info(
+ f"Created spectra arrays with TOF bin size: {tof_bin_size} nS and {len(spectra_array) = } ... Done!"
+ )
def select_output_folder(self):
-
if (self.spectra_file_found is False) and (self.spectra_array is None):
- display(HTML("You need to create the spectra arrays before selecting the output folder!"))
- notebook_logging.error("You need to create the spectra arrays before selecting the output folder!")
+ display(
+ HTML(
+ "You need to create the spectra arrays before selecting the output folder!"
+ )
+ )
+ notebook_logging.error(
+ "You need to create the spectra arrays before selecting the output folder!"
+ )
return
if self.debug:
self.output_folder_selected(DEBUG_DATA.output_folder)
else:
self.select_folder(
- instruction="Select output folder",
- start_dir=self.output_dir,
+ instruction="Select output folder",
+ start_dir=self.output_dir,
next_function=self.output_folder_selected,
newdir_toolbar_button=True,
)
@@ -812,11 +1043,13 @@ def select_output_folder(self):
def retrieve_nexus_file_path(self):
"""
Retrieve the NeXus file paths for sample, OB and DC.
-
+
This function assumes that the NeXus files are named in a specific format"""
all_nexus_files_found = True
- notebook_logging.info("Retrieving NeXus file paths for sample, OB and DC runs...")
+ notebook_logging.info(
+ "Retrieving NeXus file paths for sample, OB and DC runs..."
+ )
notebook_logging.info("\tworking with sample runs:")
for full_path in self.dict_sample.keys():
@@ -880,8 +1113,8 @@ def retrieve_nexus_file_path(self):
return all_nexus_files_found
def _on_remove_container_flag_change(self, change):
- if change['new']:
- # enable the widgets
+ if change["new"]:
+ # enable the widgets
disable_widgets = False
else:
# disable the widgets
@@ -889,10 +1122,13 @@ def _on_remove_container_flag_change(self, change):
self.remove_container_options_flag.disabled = disable_widgets
def settings(self):
-
# check here that the user selected a folder for output
if self.output_folder is None:
- display(HTML("You forgot to select an output folder!"))
+ display(
+ HTML(
+ "You forgot to select an output folder!"
+ )
+ )
return
all_nexus_found = self.retrieve_nexus_file_path()
@@ -901,43 +1137,70 @@ def settings(self):
tpx3_disabled_flag = True if self.detector_type == DetectorType.tpx3 else False
# regular normalization
- display(HTML("Normalization pixel by pixel"))
- display(widgets.Checkbox(description="Normalization of images pixel by pixel",
- value=True,
- disabled=True,
- layout=widgets.Layout(width="600px")))
+ display(
+ HTML(
+ "Normalization pixel by pixel"
+ )
+ )
+ display(
+ widgets.Checkbox(
+ description="Normalization of images pixel by pixel",
+ value=True,
+ disabled=True,
+ layout=widgets.Layout(width="600px"),
+ )
+ )
display(HTML("
"))
# normalization of full spectrum of ROI
- display(HTML("Normalization of full spectrum of ROI"))
- display(HTML("If checked, normalization will be done as follows. After selecting a region of interest (ROI), for each image, the total counts of that region of the sample will be divided by the total" \
- " counts of the same region of the OB. This will produce a profile of this normalization value for each image."))
- self.full_spectrum_roi_flag = widgets.Checkbox(description="Work on full spectrum of ROI", value=False)
+ display(
+ HTML(
+ "Normalization of full spectrum of ROI"
+ )
+ )
+ display(
+ HTML(
+ "If checked, normalization will be done as follows. After selecting a region of interest (ROI), for each image, the total counts of that region of the sample will be divided by the total"
+ " counts of the same region of the OB. This will produce a profile of this normalization value for each image."
+ )
+ )
+ self.full_spectrum_roi_flag = widgets.Checkbox(
+ description="Work on full spectrum of ROI", value=False
+ )
display(self.full_spectrum_roi_flag)
display(HTML("
"))
# how to combine sample runs if more than 1 sample provided
self.combine_sample_runs_flag = widgets.Checkbox(
- description="Combine sample runs (all sample will produce one normalization output)",
- value=False,
+ description="Combine sample runs (all sample will produce one normalization output)",
+ value=False,
disabled=False,
layout=widgets.Layout(width="600px"),
)
if len(self.dict_sample) > 1:
- display(HTML("How to treat the sample runs"))
+ display(
+ HTML(
+ "How to treat the sample runs"
+ )
+ )
display(self.combine_sample_runs_flag)
display(HTML("
"))
# remove container option
- display(HTML("Remove container"))
- self.remove_container_flag = widgets.Checkbox(description="Do you want to remove container signal?",
- value=False,
- layout=widgets.Layout(width="600px"))
- self.remove_container_flag.observe(self._on_remove_container_flag_change, names='value')
+ display(
+ HTML("Remove container")
+ )
+ self.remove_container_flag = widgets.Checkbox(
+ description="Do you want to remove container signal?",
+ value=False,
+ layout=widgets.Layout(width="600px"),
+ )
+ self.remove_container_flag.observe(
+ self._on_remove_container_flag_change, names="value"
+ )
display(self.remove_container_flag)
- white_space = widgets.Label("\t\t",
- layout=widgets.Layout(width="150px"))
+ white_space = widgets.Label("\t\t", layout=widgets.Layout(width="150px"))
self.remove_container_options_flag = widgets.RadioButtons(
options=[
"Select a ROI of the sample containing only the container signal",
@@ -947,44 +1210,47 @@ def settings(self):
disabled=True,
layout=widgets.Layout(width="500px"),
)
-
- hori_layout = widgets.HBox([white_space,
- self.remove_container_options_flag],
- layout=widgets.Layout(align_items="center",
- width="100%"))
+
+ hori_layout = widgets.HBox(
+ [white_space, self.remove_container_options_flag],
+ layout=widgets.Layout(align_items="center", width="100%"),
+ )
display(hori_layout)
display(HTML("
"))
# normalization options
- display(HTML("What to take into account for the normalization"))
+ display(
+ HTML(
+ "What to take into account for the normalization"
+ )
+ )
if all_nexus_found:
_value = True
- _disabled=False
+ _disabled = False
else:
_value = False
_disabled = True
- self.proton_charge_flag = widgets.Checkbox(description="Proton charge",
- value=_value,
- disabled=_disabled)
-
- self.monitor_counts_flag = widgets.Checkbox(description="Monitor counts",
- value=False,
- disabled=_disabled)
-
-
+ self.proton_charge_flag = widgets.Checkbox(
+ description="Proton charge", value=_value, disabled=_disabled
+ )
+
+ self.monitor_counts_flag = widgets.Checkbox(
+ description="Monitor counts", value=False, disabled=_disabled
+ )
+
## FIXME
shutter_counts_value = False
# shutter_counts_value = not tpx3_disabled_flag
-
+
self.shutter_counts_flag = widgets.Checkbox(
- description="Shutter counts", value=shutter_counts_value, disabled=tpx3_disabled_flag
+ description="Shutter counts",
+ value=shutter_counts_value,
+ disabled=tpx3_disabled_flag,
)
self.correct_chips_alignment_flag = widgets.Checkbox(
- description="Correct chips alignment",
- disabled=False,
- value=True
+ description="Correct chips alignment", disabled=False, value=True
)
vertical_layout = widgets.VBox(
@@ -999,41 +1265,74 @@ def settings(self):
display(HTML("
"))
- display(HTML("How to handle OB zeros - May take much more time!"))
-
- display(widgets.Checkbox(description="Ignore zeros in OB during normalization",
- value=True,
- disabled=True,
- layout=widgets.Layout(width="600px")))
-
- self.replace_ob_zeros_by_local_median_flag = widgets.Checkbox(description="Replace zeros by local median",
- value=False,
- layout=widgets.Layout(width="500px"))
- self.replace_ob_zeros_by_local_median_flag.observe(self._on_replace_ob_zeros_by_local_median_flag_change,
- names='value')
-
+ display(
+ HTML(
+ "How to handle OB zeros - May take much more time!"
+ )
+ )
+
+ display(
+ widgets.Checkbox(
+ description="Ignore zeros in OB during normalization",
+ value=True,
+ disabled=True,
+ layout=widgets.Layout(width="600px"),
+ )
+ )
+
+ self.replace_ob_zeros_by_local_median_flag = widgets.Checkbox(
+ description="Replace zeros by local median",
+ value=False,
+ layout=widgets.Layout(width="500px"),
+ )
+ self.replace_ob_zeros_by_local_median_flag.observe(
+ self._on_replace_ob_zeros_by_local_median_flag_change, names="value"
+ )
+
display(self.replace_ob_zeros_by_local_median_flag)
- kernel_size_label = widgets.Label(value="Kernel size for local median (odd number):",
- layout=widgets.Layout(width="300"))
- self.kernel_size_for_local_median_y = widgets.BoundedIntText(description="y axis:",
- value=3, min=1, max=99, step=2, layout=widgets.Layout(width="150px")
+ kernel_size_label = widgets.Label(
+ value="Kernel size for local median (odd number):",
+ layout=widgets.Layout(width="300"),
)
- self.kernel_size_for_local_median_x = widgets.BoundedIntText(description="x axis:",
- value=3, min=1, max=99, step=2, layout=widgets.Layout(width="150px")
+ self.kernel_size_for_local_median_y = widgets.BoundedIntText(
+ description="y axis:",
+ value=3,
+ min=1,
+ max=99,
+ step=2,
+ layout=widgets.Layout(width="150px"),
)
- self.kernel_size_for_local_median_tof = widgets.BoundedIntText(description="tof axis:",
- value=1, min=1, max=99, step=2, layout=widgets.Layout(width="150px")
+ self.kernel_size_for_local_median_x = widgets.BoundedIntText(
+ description="x axis:",
+ value=3,
+ min=1,
+ max=99,
+ step=2,
+ layout=widgets.Layout(width="150px"),
+ )
+ self.kernel_size_for_local_median_tof = widgets.BoundedIntText(
+ description="tof axis:",
+ value=1,
+ min=1,
+ max=99,
+ step=2,
+ layout=widgets.Layout(width="150px"),
+ )
+ hori_layout = widgets.HBox(
+ [
+ kernel_size_label,
+ self.kernel_size_for_local_median_y,
+ self.kernel_size_for_local_median_x,
+ self.kernel_size_for_local_median_tof,
+ ],
+ hori_layout=widgets.Layout(align_items="center", width="100%"),
)
- hori_layout = widgets.HBox([kernel_size_label,
- self.kernel_size_for_local_median_y,
- self.kernel_size_for_local_median_x,
- self.kernel_size_for_local_median_tof],
- hori_layout=widgets.Layout(align_items="center",
- width="100%"))
display(hori_layout)
- _label = widgets.Label(value="Maximum number of iterations:", layout=widgets.Layout(width="300px"))
+ _label = widgets.Label(
+ value="Maximum number of iterations:", layout=widgets.Layout(width="300px")
+ )
self.maximum_iterations_ui = widgets.BoundedIntText(
value=2,
min=1,
@@ -1041,23 +1340,32 @@ def settings(self):
step=1,
layout=widgets.Layout(width="50px"),
)
- hori_layout = widgets.HBox([_label, self.maximum_iterations_ui],
- hori_layout=widgets.Layout(align_items="center",
- width="100%"))
+ hori_layout = widgets.HBox(
+ [_label, self.maximum_iterations_ui],
+ hori_layout=widgets.Layout(align_items="center", width="100%"),
+ )
display(hori_layout)
display(HTML("
"))
- label = widgets.Label(value="Distance source detector (m)", layout=widgets.Layout(width="200px"))
+ label = widgets.Label(
+ value="Distance source detector (m)", layout=widgets.Layout(width="200px")
+ )
self.distance_source_detector = widgets.FloatText(
- value=distance_source_detector_m[self.instrument], disabled=False, layout=widgets.Layout(width="50px")
+ value=distance_source_detector_m[self.instrument],
+ disabled=False,
+ layout=widgets.Layout(width="50px"),
)
hori_layout = widgets.HBox([label, self.distance_source_detector])
display(hori_layout)
if self.instrument == "SNAP":
- label = widgets.Label(value="Detector offset (us)", layout=widgets.Layout(width="200px"))
- self.detector_offset_us = widgets.FloatText(value=0.0, disabled=False, layout=widgets.Layout(width="50px"))
+ label = widgets.Label(
+ value="Detector offset (us)", layout=widgets.Layout(width="200px")
+ )
+ self.detector_offset_us = widgets.FloatText(
+ value=0.0, disabled=False, layout=widgets.Layout(width="50px")
+ )
hori_layout = widgets.HBox([label, self.detector_offset_us])
display(hori_layout)
@@ -1075,8 +1383,8 @@ def select_container_from_file(self):
self.roi_container_file = MyFileSelectorPanel(
instruction="Select ROI container file",
start_dir=self.output_dir,
- filters={"ROI container files": ["*_container_roi.tiff"]}, # scitiff file
- type='file',
+ filters={"ROI container files": ["*_container_roi.tiff"]}, # scitiff file
+ type="file",
multiple=False,
next=self.load_container_roi_from_file,
)
@@ -1085,24 +1393,33 @@ def select_container_from_file(self):
def load_container_roi_from_file(self, file_path):
self.container_roi_file = file_path
notebook_logging.info(f"Loading container ROI from file: {file_path} ...")
- display(HTML(f"Will use the container ROI file: {file_path}!"))
-
+ display(
+ HTML(
+ f"Will use the container ROI file: {file_path}!"
+ )
+ )
+
master_dict = load_json(file_path)
self.container_integrated_image = master_dict["integrated_image"]
- self.container_roi = Roi(left=master_dict["container_roi"]["left"],
- top=master_dict["container_roi"]["top"],
- width=master_dict["container_roi"]["width"],
- height=master_dict["container_roi"]["height"])
-
- def select_container(self):
+ self.container_roi = Roi(
+ left=master_dict["container_roi"]["left"],
+ top=master_dict["container_roi"]["top"],
+ width=master_dict["container_roi"]["width"],
+ height=master_dict["container_roi"]["height"],
+ )
- # load first sample and display integrated image to select ROI
+ def select_container(self):
+ # load first sample and display integrated image to select ROI
if not self.dict_sample:
display(HTML("No sample runs selected!"))
return
- display(HTML("Select ROI of ONLY the container!"))
- logging.info(f"Selecting ROI of ONLY the container ...")
+ display(
+ HTML(
+ "Select ROI of ONLY the container!"
+ )
+ )
+ logging.info("Selecting ROI of ONLY the container ...")
if self.integrated_data is None:
self.integrated_data = self.get_integrated_data(self.dict_sample)
@@ -1117,61 +1434,89 @@ def select_container(self):
vmin = 0
vmax = int(np.max(integrated_data))
self.vrange_container = [vmin, vmax]
-
+
def container_roi_selection(vrange, left_right, top_bottom):
-
fig, ax = plt.subplots(figsize=(10, 10))
- im = ax.imshow(integrated_data, cmap="viridis", aspect="auto", vmin=vrange[0], vmax=vrange[1])
- cbar = plt.colorbar(im, ax=ax, orientation="vertical", label="Intensity", shrink=0.5)
+ im = ax.imshow(
+ integrated_data,
+ cmap="viridis",
+ aspect="auto",
+ vmin=vrange[0],
+ vmax=vrange[1],
+ )
+ cbar = plt.colorbar(
+ im, ax=ax, orientation="vertical", label="Intensity", shrink=0.5
+ )
logging.info("Updating rectangle ...")
if self.rect_container:
self.rect_container.remove()
-
- self.rect_container = patches.Rectangle((left_right[0], top_bottom[0]), left_right[1]-left_right[0], top_bottom[1]-top_bottom[0], linewidth=1, edgecolor='r', facecolor='none')
+
+ self.rect_container = patches.Rectangle(
+ (left_right[0], top_bottom[0]),
+ left_right[1] - left_right[0],
+ top_bottom[1] - top_bottom[0],
+ linewidth=1,
+ edgecolor="r",
+ facecolor="none",
+ )
ax.add_patch(self.rect_container)
- ax.set_title(f"Select ROI containing only the container")
-
- self.container_roi = Roi(left=left_right[0], top=top_bottom[0], width=left_right[1]-left_right[0], height=top_bottom[1]-top_bottom[0])
+ ax.set_title("Select ROI containing only the container")
+
+ self.container_roi = Roi(
+ left=left_right[0],
+ top=top_bottom[0],
+ width=left_right[1] - left_right[0],
+ height=top_bottom[1] - top_bottom[0],
+ )
widgets_width = "800px"
self.interactive_plot = interactive(
container_roi_selection,
- vrange = widgets.IntRangeSlider(min=0,
- max=int(np.max(integrated_data)),
- step=1,
- value=[0, int(np.max(integrated_data))],
- description="vrange",
- layout=widgets.Layout(width=widgets_width)),
- left_right = widgets.IntRangeSlider(min=0,
- max=integrated_data.shape[1]-1,
- step=1,
- value=[default_left, default_left+default_width],
- description="left_right",
- layout=widgets.Layout(width=widgets_width)),
- top_bottom = widgets.IntRangeSlider(min=0,
- max=integrated_data.shape[0]-1,
- step=1,
- value=[default_top, default_top+default_height],
- description="top_bottom",
- layout=widgets.Layout(width=widgets_width)),
+ vrange=widgets.IntRangeSlider(
+ min=0,
+ max=int(np.max(integrated_data)),
+ step=1,
+ value=[0, int(np.max(integrated_data))],
+ description="vrange",
+ layout=widgets.Layout(width=widgets_width),
+ ),
+ left_right=widgets.IntRangeSlider(
+ min=0,
+ max=integrated_data.shape[1] - 1,
+ step=1,
+ value=[default_left, default_left + default_width],
+ description="left_right",
+ layout=widgets.Layout(width=widgets_width),
+ ),
+ top_bottom=widgets.IntRangeSlider(
+ min=0,
+ max=integrated_data.shape[0] - 1,
+ step=1,
+ value=[default_top, default_top + default_height],
+ description="top_bottom",
+ layout=widgets.Layout(width=widgets_width),
+ ),
)
display(self.interactive_plot)
def select_roi(self):
-
- logging.info(f"Selecting ROI for full spectrum normalization...")
+ logging.info("Selecting ROI for full spectrum normalization...")
# load first sample and display integrated image to select ROI
if not self.dict_sample:
display(HTML("No sample runs selected!"))
return
- display(HTML("Select ROI for full spectrum normalization!"))
+ display(
+ HTML(
+ "Select ROI for full spectrum normalization!"
+ )
+ )
if self.integrated_data is None:
self.integrated_data = self.get_integrated_data(self.dict_sample)
-
+
integrated_data = self.integrated_data
default_left = self.default_roi.left
@@ -1179,112 +1524,143 @@ def select_roi(self):
default_width = self.default_roi.width
default_height = self.default_roi.height
- self.roi = Roi(left=default_left, top=default_top,
- width=default_width, height=default_height)
+ self.roi = Roi(
+ left=default_left,
+ top=default_top,
+ width=default_width,
+ height=default_height,
+ )
-
def roi_selection(vrange, left_right, top_bottom):
-
left, right = left_right
width = right - left
-
+
top, bottom = top_bottom
height = bottom - top
-
+
vmin, vmax = vrange
-
+
fig, ax = plt.subplots(figsize=(10, 10))
- ax.imshow(integrated_data, cmap="viridis", aspect="auto", vmin=vmin, vmax=vmax)
- rect = patches.Rectangle((left, top), width, height, linewidth=1, edgecolor='r', facecolor='none')
+ ax.imshow(
+ integrated_data, cmap="viridis", aspect="auto", vmin=vmin, vmax=vmax
+ )
+ rect = patches.Rectangle(
+ (left, top), width, height, linewidth=1, edgecolor="r", facecolor="none"
+ )
ax.add_patch(rect)
- ax.set_title(f"Select ROI for full spectrum normalization")
+ ax.set_title("Select ROI for full spectrum normalization")
plt.show()
- logging.info(f"Selected ROI - left: {left}, top: {top}, width: {width}, height: {height}")
+ logging.info(
+ f"Selected ROI - left: {left}, top: {top}, width: {width}, height: {height}"
+ )
self.roi = Roi(left=left, top=top, width=width, height=height)
widgets_width = "800px"
interactive_plot = interactive(
roi_selection,
- vrange = widgets.IntRangeSlider(min=0,
- max=int(np.max(integrated_data)),
- step=1,
- value=[0, int(np.max(integrated_data))],
- description="vrange",
- layout=widgets.Layout(width=widgets_width)),
- left_right=widgets.IntRangeSlider(min=0,
- max=integrated_data.shape[1]-1,
- step=1,
- value=[default_left, default_left+default_width],
- description="left_right",
- layout=widgets.Layout(width=widgets_width)),
- top_bottom=widgets.IntRangeSlider(min=0,
- max=integrated_data.shape[0]-1,
- step=1,
- value=[default_top, default_top+default_height],
- description="top_bottom",
- layout=widgets.Layout(width=widgets_width)),
- )
-
+ vrange=widgets.IntRangeSlider(
+ min=0,
+ max=int(np.max(integrated_data)),
+ step=1,
+ value=[0, int(np.max(integrated_data))],
+ description="vrange",
+ layout=widgets.Layout(width=widgets_width),
+ ),
+ left_right=widgets.IntRangeSlider(
+ min=0,
+ max=integrated_data.shape[1] - 1,
+ step=1,
+ value=[default_left, default_left + default_width],
+ description="left_right",
+ layout=widgets.Layout(width=widgets_width),
+ ),
+ top_bottom=widgets.IntRangeSlider(
+ min=0,
+ max=integrated_data.shape[0] - 1,
+ step=1,
+ value=[default_top, default_top + default_height],
+ description="top_bottom",
+ layout=widgets.Layout(width=widgets_width),
+ ),
+ )
+
display(interactive_plot)
def post_settings(self):
-
at_least_one_option = False
if self.full_spectrum_roi_flag.value:
self.select_roi()
at_least_one_option = True
if self.remove_container_flag.value:
-
if self.full_spectrum_roi_flag.value:
- display(HTML("
")) # to improve readability
+ display(HTML("
")) # to improve readability
- if self.remove_container_options_flag.value == "Use previously saved ROI containing only the container signal":
+ if (
+ self.remove_container_options_flag.value
+ == "Use previously saved ROI containing only the container signal"
+ ):
self.select_container_from_file()
self.container_roi_from_file = True
-
+
else:
self.select_container()
self.container_roi_from_file = False
self.we_need_to_automatically_save_the_container_roi = True
-
+
at_least_one_option = True
if not at_least_one_option:
self.roi = None
self.container_roi = None
- display(HTML("Info: You are good to go, nothing to do here!"))
+ display(
+ HTML(
+ "Info: You are good to go, nothing to do here!"
+ )
+ )
def preview_roi_selection_container_imported(self):
- # preview of the roi selected
+ # preview of the roi selected
if self.container_roi is not None:
-
if self.container_roi_from_file:
-
- display(HTML("Preview of the loaded ROI from file ..."))
+ display(
+ HTML(
+ "Preview of the loaded ROI from file ..."
+ )
+ )
integrated_data = self.container_integrated_image
fig, ax = plt.subplots(figsize=(5, 5))
im = ax.imshow(integrated_data, cmap="viridis", aspect="auto")
- cbar = plt.colorbar(im, ax=ax, orientation="vertical", label="Intensity", shrink=0.5)
+ cbar = plt.colorbar(
+ im, ax=ax, orientation="vertical", label="Intensity", shrink=0.5
+ )
- rect = patches.Rectangle((self.container_roi.left, self.container_roi.top),
- self.container_roi.width,
- self.container_roi.height,
- linewidth=1, edgecolor='r', facecolor='none')
+ rect = patches.Rectangle(
+ (self.container_roi.left, self.container_roi.top),
+ self.container_roi.width,
+ self.container_roi.height,
+ linewidth=1,
+ edgecolor="r",
+ facecolor="none",
+ )
ax.add_patch(rect)
- ax.set_title(f"Loaded ROI containing only the container from file")
+ ax.set_title("Loaded ROI containing only the container from file")
plt.show()
-
+
else:
- display(HTML("ROI container selected within that notebook (no need to preview again)!"))
-
+ display(
+ HTML(
+ "ROI container selected within that notebook (no need to preview again)!"
+ )
+ )
+
else:
display(HTML("No container ROI selected!"))
def _on_replace_ob_zeros_by_local_median_flag_change(self, change):
- if change['new']:
+ if change["new"]:
self.kernel_size_for_local_median_y.disabled = False
self.kernel_size_for_local_median_x.disabled = False
self.kernel_size_for_local_median_tof.disabled = False
@@ -1296,19 +1672,20 @@ def _on_replace_ob_zeros_by_local_median_flag_change(self, change):
self.maximum_iterations_ui.disabled = True
def what_to_export(self):
-
if self.combine_sample_runs_flag.value:
combined_flag = True
else:
combined_flag = False
-
+
display(HTML("Stack of images"))
self.export_corrected_stack_of_sample_data = widgets.Checkbox(
- description="Export corrected stack of sample data", layout=widgets.Layout(width="100%"), value=False
+ description="Export corrected stack of sample data",
+ layout=widgets.Layout(width="100%"),
+ value=False,
)
self.export_corrected_stack_of_ob_data = widgets.Checkbox(
- description="Export corrected stack of ob data",
- layout=widgets.Layout(width="100%"),
+ description="Export corrected stack of ob data",
+ layout=widgets.Layout(width="100%"),
value=False,
disabled=True,
)
@@ -1327,37 +1704,39 @@ def what_to_export(self):
# disabled=True,
# )
- list_widget_to_display = [
- self.export_corrected_stack_of_sample_data,
- # self.export_corrected_stack_of_ob_data,
- self.export_corrected_stack_of_normalized_data,
+ list_widget_to_display = [
+ self.export_corrected_stack_of_sample_data,
+ # self.export_corrected_stack_of_ob_data,
+ self.export_corrected_stack_of_normalized_data,
]
# if self.combine_sample_runs_flag.value:
# list_widget_to_display.append(self.export_corrected_stack_of_combined_normalized_data)
- label = widgets.Label(value="Note: Any of the stacks exported will also contain the original spectra file")
+ label = widgets.Label(
+ value="Note: Any of the stacks exported will also contain the original spectra file"
+ )
list_widget_to_display.append(label)
- vertical_layout = widgets.VBox(
- list_widget_to_display
- )
+ vertical_layout = widgets.VBox(list_widget_to_display)
display(vertical_layout)
- display(HTML("Integrated images"))
+ display(
+ HTML("Integrated images")
+ )
self.export_corrected_integrated_sample_data = widgets.Checkbox(
- description="Export corrected integrated sample data",
- layout=widgets.Layout(width="100%"),
- value=False
+ description="Export corrected integrated sample data",
+ layout=widgets.Layout(width="100%"),
+ value=False,
)
self.export_corrected_integrated_ob_data = widgets.Checkbox(
- description="Export corrected integrated ob data",
- layout=widgets.Layout(width="100%"),
+ description="Export corrected integrated ob data",
+ layout=widgets.Layout(width="100%"),
value=False,
disabled=True,
)
self.export_corrected_integrated_normalized_data = widgets.Checkbox(
- description="Export integrated normalized data (integrated sample divide by integrated ob)",
- layout=widgets.Layout(width="100%"),
- value=False
+ description="Export integrated normalized data (integrated sample divide by integrated ob)",
+ layout=widgets.Layout(width="100%"),
+ value=False,
)
self.export_corrected_integrated_combined_normalized_data = widgets.Checkbox(
@@ -1367,13 +1746,15 @@ def what_to_export(self):
disabled=False,
)
- list_widget_to_display = [
- self.export_corrected_integrated_sample_data,
- # self.export_corrected_integrated_ob_data,
- self.export_corrected_integrated_normalized_data,
+ list_widget_to_display = [
+ self.export_corrected_integrated_sample_data,
+ # self.export_corrected_integrated_ob_data,
+ self.export_corrected_integrated_normalized_data,
]
if self.combine_sample_runs_flag.value:
- list_widget_to_display.append(self.export_corrected_integrated_combined_normalized_data)
+ list_widget_to_display.append(
+ self.export_corrected_integrated_combined_normalized_data
+ )
vertical_layout = widgets.VBox(
list_widget_to_display,
@@ -1381,23 +1762,27 @@ def what_to_export(self):
display(vertical_layout)
- if not (self.spectra_array is None) or (self.we_need_to_automatically_save_the_container_roi):
+ if self.spectra_array is not None or (
+ self.we_need_to_automatically_save_the_container_roi
+ ):
display(HTML("Others"))
-
+
if self.spectra_array is None:
self.export_spectra_file = widgets.Checkbox(
- description="Export spectra file used for normalization",
- layout=widgets.Layout(width="100%"),
- value=True)
+ description="Export spectra file used for normalization",
+ layout=widgets.Layout(width="100%"),
+ value=True,
+ )
display(self.export_spectra_file)
-
+
if self.we_need_to_automatically_save_the_container_roi:
self.export_container_roi = widgets.Checkbox(
- description="Export container ROI file used for normalization",
- layout=widgets.Layout(width="100%"),
- value=True)
+ description="Export container ROI file used for normalization",
+ layout=widgets.Layout(width="100%"),
+ value=True,
+ )
display(self.export_container_roi)
-
+
def check_folder_is_valid(self, full_path):
list_tiff = glob.glob(os.path.join(full_path, "*.tif*"))
if list_tiff:
@@ -1407,37 +1792,60 @@ def check_folder_is_valid(self, full_path):
def sample_folder_selected(self, folder_selected):
self.sample_folder = folder_selected
- display(HTML(f"Sample folder selected: {folder_selected}"))
+ display(
+ HTML(
+ f"Sample folder selected: {folder_selected}"
+ )
+ )
def ob_folder_selected(self, folder_selected):
self.ob_folder = folder_selected
- display(HTML(f"Open beam folder selected: {folder_selected}"))
+ display(
+ HTML(
+ f"Open beam folder selected: {folder_selected}"
+ )
+ )
def dc_folder_selected(self, folder_selected):
self.dc_folder = folder_selected
- display(HTML(f"Dark current folder selected: {folder_selected}"))
+ display(
+ HTML(
+ f"Dark current folder selected: {folder_selected}"
+ )
+ )
def save_ob_run_numbers_selected(self, folder_selected):
self.ob_run_numbers_selected = folder_selected
-
+
def save_dc_run_numbers_selected(self, folder_selected):
self.dc_run_numbers_selected = folder_selected
-
+
def output_folder_selected(self, folder_selected):
self.output_folder = folder_selected
display(HTML("Output folder selected:"))
if os.path.exists(folder_selected):
- display(HTML(f"{folder_selected} - FOUND!"))
+ display(
+ HTML(f"{folder_selected} - FOUND!")
+ )
notebook_logging.info(f"Output folder selected: {folder_selected} - FOUND")
else:
- display(HTML(f"{folder_selected} - DOES NOT EXIST and will be CREATED!"))
- notebook_logging.info(f"Output folder selected: {folder_selected} - NOT FOUND and will be CREATED!")
-
- def select_folder(self, instruction="Select a folder",
- next_function=None,
- start_dir=None,
- multiple=False,
- newdir_toolbar_button=False):
+ display(
+ HTML(
+ f"{folder_selected} - DOES NOT EXIST and will be CREATED!"
+ )
+ )
+ notebook_logging.info(
+ f"Output folder selected: {folder_selected} - NOT FOUND and will be CREATED!"
+ )
+
+ def select_folder(
+ self,
+ instruction="Select a folder",
+ next_function=None,
+ start_dir=None,
+ multiple=False,
+ newdir_toolbar_button=False,
+ ):
# go straight to autoreduce/mcp folder
if start_dir is None:
start_dir = self.autoreduce_dir
@@ -1525,9 +1933,11 @@ def run_normalization_with_list_of_runs(self, preview=False):
# shutter_counts_flag=self.shutter_counts_flag.value,
# replace_ob_zeros_by_nan_flag=self.replace_ob_zeros_by_nan_flag.value,
replace_ob_zeros_by_local_median_flag=self.replace_ob_zeros_by_local_median_flag.value,
- kernel_size_for_local_median=(self.kernel_size_for_local_median_y.value,
- self.kernel_size_for_local_median_x.value,
- self.kernel_size_for_local_median_tof.value),
+ kernel_size_for_local_median=(
+ self.kernel_size_for_local_median_y.value,
+ self.kernel_size_for_local_median_x.value,
+ self.kernel_size_for_local_median_tof.value,
+ ),
max_iterations=self.maximum_iterations_ui.value,
correct_chips_alignment_flag=self.correct_chips_alignment_flag.value,
correct_chips_alignment_config=correct_chips_alignment_config,
@@ -1540,12 +1950,12 @@ def run_normalization_with_list_of_runs(self, preview=False):
combine_samples=self.combine_sample_runs_flag.value,
roi=self.roi,
container_roi=self.container_roi,
- container_roi_file=self.container_roi_file
+ container_roi_file=self.container_roi_file,
)
-
+
display(HTML("Normalization completed"))
# display(HTML("Log file: /SNS/VENUS/shared/logs/normalization_for_timepix.log"))
-
+
def profile_of_roi(self):
normalized_data = self.normalized_dict.data
@@ -1554,13 +1964,14 @@ def profile_of_roi(self):
tof_array = self.normalized_dict.tof_array
def plot_normalized_profile_of_roi(index, left=0, top=0, width=50, height=50):
-
_normalized_data = normalized_data[index]
_integrated = np.nanmean(_normalized_data, axis=0)
- _profile = np.nanmean(_normalized_data[:, top : top + height, left : left + width], axis=0)
- fig, axs = plt.subplots(ncols=2, nrows=2,figsize=(10, 6))
- im = axs[0,0].imshow(_integrated, cmap="viridis")
- axs[0,0].add_patch(
+ _profile = np.nanmean(
+ _normalized_data[:, top : top + height, left : left + width], axis=0
+ )
+ fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(10, 6))
+ im = axs[0, 0].imshow(_integrated, cmap="viridis")
+ axs[0, 0].add_patch(
patches.Rectangle(
(left, top),
width,
@@ -1571,21 +1982,56 @@ def plot_normalized_profile_of_roi(index, left=0, top=0, width=50, height=50):
)
)
- axs[0,0].set_title(f"Integrated normalized data - {index}")
- fig.colorbar(im, ax=axs[0,0], orientation="vertical", label="Intensity")
- axs[1,0].plot(lambda_array, _profile)
- axs[1,0].set_title(f"Profile of ROI - {index}")
- axs[1,0].set_xlabel("lambda_array")
- axs[1,0].set_ylabel("Intensity (a.u.)")
-
- _plot_normalized = interactive(widgets.Dropdown(options=list(normalized_data.keys()),
- description="Sample run:",
- layout=widgets.Layout(width="300px")),
- left=widgets.BoundedIntText(value=0, min=0, max=512, step=1, description="left:", layout=widgets.Layout(width="200px")),
- top=widgets.BoundedIntText(value=0, min=0, max=512, step=1, description="top:", layout=widgets.Layout(width="200px")),
- width=widgets.BoundedIntText(value=50, min=1, max=512, step=1, description="width:", layout=widgets.Layout(width="200px")),
- height=widgets.BoundedIntText(value=50, min=1, max=512, step=1, description="height:", layout=widgets.Layout(width="200px")),
- function=widgets.Dropdown(options=["mean", "median"], description="Function:", layout=widgets.Layout(width="200px")),
+ axs[0, 0].set_title(f"Integrated normalized data - {index}")
+ fig.colorbar(im, ax=axs[0, 0], orientation="vertical", label="Intensity")
+ axs[1, 0].plot(lambda_array, _profile)
+ axs[1, 0].set_title(f"Profile of ROI - {index}")
+ axs[1, 0].set_xlabel("lambda_array")
+ axs[1, 0].set_ylabel("Intensity (a.u.)")
+
+ _plot_normalized = interactive(
+ widgets.Dropdown(
+ options=list(normalized_data.keys()),
+ description="Sample run:",
+ layout=widgets.Layout(width="300px"),
+ ),
+ left=widgets.BoundedIntText(
+ value=0,
+ min=0,
+ max=512,
+ step=1,
+ description="left:",
+ layout=widgets.Layout(width="200px"),
+ ),
+ top=widgets.BoundedIntText(
+ value=0,
+ min=0,
+ max=512,
+ step=1,
+ description="top:",
+ layout=widgets.Layout(width="200px"),
+ ),
+ width=widgets.BoundedIntText(
+ value=50,
+ min=1,
+ max=512,
+ step=1,
+ description="width:",
+ layout=widgets.Layout(width="200px"),
+ ),
+ height=widgets.BoundedIntText(
+ value=50,
+ min=1,
+ max=512,
+ step=1,
+ description="height:",
+ layout=widgets.Layout(width="200px"),
+ ),
+ function=widgets.Dropdown(
+ options=["mean", "median"],
+ description="Function:",
+ layout=widgets.Layout(width="200px"),
+ ),
)
display(_plot_normalized)
@@ -1593,9 +2039,13 @@ def plot_normalized_profile_of_roi(index, left=0, top=0, width=50, height=50):
def legend(cls) -> None:
display(HTML("
"))
display(HTML("Legend
"))
- display(HTML(""
- "- Mandatory steps must be performed to ensure proper data preparation and reconstruction.
"
- "- Optional but recommended steps are not mandatory but should be performed to ensure proper data preparation and reconstruction.
"
- "- Optional steps are not mandatory but highly recommended to improve the quality of your reconstruction.
"
- "
"))
+ display(
+ HTML(
+ ""
+ "- Mandatory steps must be performed to ensure proper data preparation and reconstruction.
"
+ "- Optional but recommended steps are not mandatory but should be performed to ensure proper data preparation and reconstruction.
"
+ "- Optional steps are not mandatory but highly recommended to improve the quality of your reconstruction.
"
+ "
"
+ )
+ )
display(HTML("
"))
diff --git a/notebooks/__code/normalization_tof/units.py b/notebooks/__code/normalization_tof/units.py
index 8acb982f..43c1bc96 100644
--- a/notebooks/__code/normalization_tof/units.py
+++ b/notebooks/__code/normalization_tof/units.py
@@ -135,7 +135,9 @@ def convert_to_cross_section(from_unit, to_unit):
return conversion_factors[from_unit] / conversion_factors[to_unit]
-def convert_from_wavelength_to_energy_ev(wavelength, unit_from=DistanceUnitOptions.angstrom):
+def convert_from_wavelength_to_energy_ev(
+ wavelength, unit_from=DistanceUnitOptions.angstrom
+):
"""Convert wavelength to energy based on the given units.
Args:
@@ -177,15 +179,21 @@ def convert_array_from_time_to_lambda(
np.ndarray: Array of wavelength values.
"""
time_array_s = time_array * convert_time_units(time_unit, TimeUnitOptions.s)
- detector_offset_s = detector_offset * convert_time_units(detector_offset_unit, TimeUnitOptions.s)
+ detector_offset_s = detector_offset * convert_time_units(
+ detector_offset_unit, TimeUnitOptions.s
+ )
distance_source_detector_m = distance_source_detector * convert_distance_units(
distance_source_detector_unit, DistanceUnitOptions.m
)
h_over_mn = h / m_n
- lambda_m = h_over_mn * (time_array_s + detector_offset_s) / distance_source_detector_m
+ lambda_m = (
+ h_over_mn * (time_array_s + detector_offset_s) / distance_source_detector_m
+ )
- lambda_converted = lambda_m * convert_distance_units(DistanceUnitOptions.m, lambda_unit)
+ lambda_converted = lambda_m * convert_distance_units(
+ DistanceUnitOptions.m, lambda_unit
+ )
return lambda_converted
@@ -225,13 +233,22 @@ def convert_array_from_time_to_energy(
detector_units_factor = convert_time_units(detector_offset_unit, TimeUnitOptions.s)
detector_offset = detector_units_factor * detector_offset
- distance_source_detector_factor = convert_distance_units(distance_source_detector_unit, DistanceUnitOptions.m)
- distance_source_detector_m = distance_source_detector * distance_source_detector_factor
+ distance_source_detector_factor = convert_distance_units(
+ distance_source_detector_unit, DistanceUnitOptions.m
+ )
+ distance_source_detector_m = (
+ distance_source_detector * distance_source_detector_factor
+ )
# Calculate the energy in eV using the formula E_ev = 1/2 m_n (L/t_tof)^2 / electron_volt
full_time_array_s = time_array_s + detector_offset
- energy_array_ev = 0.5 * m_n * (distance_source_detector_m / full_time_array_s) ** 2 / electron_volt
+ energy_array_ev = (
+ 0.5
+ * m_n
+ * (distance_source_detector_m / full_time_array_s) ** 2
+ / electron_volt
+ )
energy_array_factor = convert_to_energy(EnergyUnitOptions.eV, energy_unit)
energy_array = energy_array_ev * energy_array_factor
diff --git a/notebooks/__code/normalization_tof/utilities.py b/notebooks/__code/normalization_tof/utilities.py
index 6f0e4a9d..3ac868cb 100644
--- a/notebooks/__code/normalization_tof/utilities.py
+++ b/notebooks/__code/normalization_tof/utilities.py
@@ -1,22 +1,18 @@
-import argparse
import glob
import logging
import multiprocessing as mp
import os
import shutil
from pathlib import Path
-from sqlite3 import Time
from typing import Tuple
import h5py
-from matplotlib import container
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import HTML, display
from PIL import Image
from skimage.io import imread
-from scipy.ndimage import median_filter
from timepix_geometry_correction.correct import TimepixGeometryCorrection
@@ -25,20 +21,14 @@
MARKERSIZE = 2
+
class NormalizedData:
- data= {}
- lambda_array= None
- tof_array= None
- energy_array= None
-
-
-from __code.normalization_tof.units import (
- DistanceUnitOptions,
- EnergyUnitOptions,
- TimeUnitOptions,
- convert_array_from_time_to_energy,
- convert_array_from_time_to_lambda,
-)
+ data = {}
+ lambda_array = None
+ tof_array = None
+ energy_array = None
+
+
# from __code.normalization_tof.normalization_for_timepix import create_master_dict
LOAD_DTYPE = np.uint16
@@ -53,6 +43,7 @@ class PLOT_SIZE:
SPECTRA_FILE_PREFIX = "Spectra.txt"
+
class DataType:
sample = "sample"
ob = "ob"
@@ -85,12 +76,14 @@ class StatusMetadata:
def _worker(fl):
-# return (imread(fl).astype(LOAD_DTYPE)).swapaxes(0, 1)
+ # return (imread(fl).astype(LOAD_DTYPE)).swapaxes(0, 1)
return (imread(fl).astype(np.float32)).swapaxes(0, 1)
- #return (imread(fl).astype(np.float32))
+ # return (imread(fl).astype(np.float32))
-def load_data_using_multithreading(list_tif: list = None, combine_tof: bool = False) -> np.ndarray:
+def load_data_using_multithreading(
+ list_tif: list = None, combine_tof: bool = False
+) -> np.ndarray:
"""load data using multithreading"""
with mp.Pool(processes=40) as pool:
data = pool.map(_worker, list_tif)
@@ -109,7 +102,9 @@ def retrieve_list_of_tif(folder: str) -> list:
def create_x_axis_file(
- lambda_array: np.ndarray = None, energy_array: np.ndarray = None, output_folder: str = "./"
+ lambda_array: np.ndarray = None,
+ energy_array: np.ndarray = None,
+ output_folder: str = "./",
) -> str:
"""create x axis file with lambda, energy and tof arrays"""
x_axis_data = {
@@ -125,7 +120,6 @@ def create_x_axis_file(
def load_images(master_dict=None, data_type=DataType.sample, verbose=False):
-
logging.info(f"Loading {data_type} data ...")
for _run_number in master_dict.keys():
logging.info(f"\tloading {data_type}# {_run_number} ... ")
@@ -141,21 +135,25 @@ def load_images(master_dict=None, data_type=DataType.sample, verbose=False):
display(HTML(f"{master_dict[_run_number][MasterDictKeys.data].shape = }"))
-def calculate_ob_data_combined_used_by_spectrum_normalization(roi=None, ob_data_combined=None, verbose=False):
-
- logging.info(f"Calculating the ob_data_combined for spectrum normalization")
+def calculate_ob_data_combined_used_by_spectrum_normalization(
+ roi=None, ob_data_combined=None, verbose=False
+):
+ logging.info("Calculating the ob_data_combined for spectrum normalization")
if roi is not None:
logging.info(f"\t{roi =}")
x0 = roi.left
y0 = roi.top
width = roi.width
height = roi.height
- ob_data_combined_for_spectrum = [np.sum(np.sum(_data[y0:y0 + height, x0:x0 + width], axis=0), axis=0) for _data in ob_data_combined]
+ ob_data_combined_for_spectrum = [
+ np.sum(np.sum(_data[y0 : y0 + height, x0 : x0 + width], axis=0), axis=0)
+ for _data in ob_data_combined
+ ]
logging.info(f"\t{np.shape(ob_data_combined_for_spectrum) = }")
logging.info(f"\t{np.shape(ob_data_combined) = }")
else:
- logging.info(f"\tno roi provided! Skipping the normalization of spectrum.")
+ logging.info("\tno roi provided! Skipping the normalization of spectrum.")
ob_data_combined_for_spectrum = None
if verbose:
@@ -164,7 +162,9 @@ def calculate_ob_data_combined_used_by_spectrum_normalization(roi=None, ob_data
return ob_data_combined_for_spectrum
-def correct_chips_alignment(data_combined=None, correct_chips_alignment_config=None, verbose=False):
+def correct_chips_alignment(
+ data_combined=None, correct_chips_alignment_config=None, verbose=False
+):
"""
correct the chips position (fill the gaps between the chips) using the dedicated library
timepix_geometry_correction (https://github.com/ornlneutronimaging/timepix_geometry_correction)
@@ -183,32 +183,37 @@ def correct_chips_alignment(data_combined=None, correct_chips_alignment_config=N
data_combined_corrected = np.zeros_like(data_combined)
for _index, _data in enumerate(data_combined):
- o_corrector = TimepixGeometryCorrection(raw_images=_data,
- config=correct_chips_alignment_config)
+ o_corrector = TimepixGeometryCorrection(
+ raw_images=_data, config=correct_chips_alignment_config
+ )
data_corrected = o_corrector.correct()
-
+
# remove useless dimension
data = np.array([np.squeeze(_data) for _data in data_corrected])
data_combined_corrected_squeezed = np.squeeze(data)
-
+
data_combined_corrected[_index] = data_combined_corrected_squeezed
logging.info(f"\t{data_combined_corrected.shape = }")
-
+
logging.info("Chips alignment corrected!")
-
+
if verbose:
display(HTML("Chips alignment corrected!"))
-
+
return data_combined_corrected
-def correct_all_samples_chips_alignment(sample_master_dict=None, correct_chips_alignment_config=None, verbose=False):
+def correct_all_samples_chips_alignment(
+ sample_master_dict=None, correct_chips_alignment_config=None, verbose=False
+):
for _sample_run_number in sample_master_dict.keys():
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = correct_chips_alignment(
- sample_master_dict[_sample_run_number][MasterDictKeys.data],
- correct_chips_alignment_config,
- verbose=verbose
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ correct_chips_alignment(
+ sample_master_dict[_sample_run_number][MasterDictKeys.data],
+ correct_chips_alignment_config,
+ verbose=verbose,
+ )
)
@@ -234,15 +239,19 @@ def normalize_by_monitor_counts(master_dict=None, run_number=None, data=None):
return data
-
-def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
- normalized_data,
- lambda_array, energy_array,
- detector_delay_us, _sample_run_number,
- combine_samples=False,
- _spectrum_normalized_data=None,
- roi=None):
-
+def preview_normalized_data(
+ _sample_data,
+ ob_data_combined,
+ dc_data_combined,
+ normalized_data,
+ lambda_array,
+ energy_array,
+ detector_delay_us,
+ _sample_run_number,
+ combine_samples=False,
+ _spectrum_normalized_data=None,
+ roi=None,
+):
"""preview normalized data"""
# display preview of normalized data
@@ -253,12 +262,12 @@ def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
display(HTML(f"Preview of run {_sample_run_number}
"))
display(HTML(f"detector delay: {detector_delay_us:.2f} us"))
-
- axs1[0].set_title(f"Integrated Sample data")
+
+ axs1[0].set_title("Integrated Sample data")
sample_integrated1 = np.nansum(_sample_data, axis=1)
sample_integrated = np.nansum(sample_integrated1, axis=1)
- axs1[1].plot(sample_integrated, 'o')
+ axs1[1].plot(sample_integrated, "o")
axs1[1].set_xlabel("File image index")
axs1[1].set_ylabel("Transmission (a.u.)")
plt.tight_layout()
@@ -271,13 +280,15 @@ def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
ob_integrated1 = np.nansum(ob_data_combined, axis=1)
ob_integrated = np.nansum(ob_integrated1, axis=1)
- axs2[1].plot(ob_integrated, 'o')
+ axs2[1].plot(ob_integrated, "o")
axs2[1].set_xlabel("File image index")
axs2[1].set_ylabel("Transmission (a.u.)")
plt.tight_layout()
if dc_data_combined is not None:
- fig, axs_dc = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs_dc = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
dc_data_integrated = np.nanmean(dc_data_combined, axis=0)
im_dc = axs_dc[0].imshow(dc_data_integrated, cmap="gray")
plt.colorbar(im_dc, ax=axs_dc[0])
@@ -285,7 +296,7 @@ def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
dc_integrated1 = np.nansum(dc_data_combined, axis=1)
dc_integrated = np.nansum(dc_integrated1, axis=1)
- axs_dc[1].plot(dc_integrated, 'o')
+ axs_dc[1].plot(dc_integrated, "o")
axs_dc[1].set_xlabel("File image index")
axs_dc[1].set_ylabel("Transmission (a.u.)")
plt.tight_layout()
@@ -295,16 +306,21 @@ def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
normalized_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
im2 = axs3[0].imshow(normalized_data_integrated, cmap="gray")
plt.colorbar(im2, ax=axs3[0])
- axs3[0].set_title(f"Integrated Normalized data")
+ axs3[0].set_title("Integrated Normalized data")
if roi is not None:
x0 = roi.left
y0 = roi.top
width = roi.width
height = roi.height
- axs3[0].add_patch(plt.Rectangle((x0, y0), width, height, fill=False, color="red", lw=2))
+ axs3[0].add_patch(
+ plt.Rectangle((x0, y0), width, height, fill=False, color="red", lw=2)
+ )
- profile_step1 = np.nanmean(normalized_data[_sample_run_number][:, y0:y0+height, x0:x0+width], axis=1)
+ profile_step1 = np.nanmean(
+ normalized_data[_sample_run_number][:, y0 : y0 + height, x0 : x0 + width],
+ axis=1,
+ )
profile = np.nanmean(profile_step1, axis=1)
_label = "pixel by pixel normalization profile of ROI"
@@ -313,7 +329,7 @@ def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
profile = np.nanmean(profile_step1, axis=1)
_label = "pixel by pixel normalization profile of full image"
- axs3[1].plot(profile, 'o', label=_label)
+ axs3[1].plot(profile, "o", label=_label)
axs3[1].set_xlabel("File image index")
axs3[1].set_ylabel("Transmission (a.u.)")
axs3[1].legend()
@@ -337,20 +353,29 @@ def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
plt.tight_layout()
if _spectrum_normalized_data is not None:
-
- fig, axs6 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs6 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
logging.info(f"{np.shape(profile) = }")
- axs6[0].plot(lambda_array, _spectrum_normalized_data, "r*",
- markersize=MARKERSIZE,
- label="spectrum normalization of ROI")
+ axs6[0].plot(
+ lambda_array,
+ _spectrum_normalized_data,
+ "r*",
+ markersize=MARKERSIZE,
+ label="spectrum normalization of ROI",
+ )
axs6[0].set_xlabel("Lambda (A)")
axs6[0].set_ylabel("Transmission (a.u.)")
axs6[0].legend()
- axs6[1].plot(energy_array, _spectrum_normalized_data, "r*",
- markersize=MARKERSIZE,
- label="spectrum normalization of ROI")
+ axs6[1].plot(
+ energy_array,
+ _spectrum_normalized_data,
+ "r*",
+ markersize=MARKERSIZE,
+ label="spectrum normalization of ROI",
+ )
axs6[1].set_xlabel("Energy (eV)")
axs6[1].set_ylabel("Transmission (a.u.)")
axs6[1].set_xscale("log")
@@ -360,11 +385,14 @@ def preview_normalized_data(_sample_data, ob_data_combined, dc_data_combined,
plt.show()
+
def get_detector_offset_from_nexus(nexus_path: str) -> float:
"""get the detector offset from the nexus file"""
with h5py.File(nexus_path, "r") as hdf5_data:
try:
- detector_offset_micros = hdf5_data["entry"]["DASlogs"]["BL10:Det:TH:DSPT1:TIDelay"]["value"][0]
+ detector_offset_micros = hdf5_data["entry"]["DASlogs"][
+ "BL10:Det:TH:DSPT1:TIDelay"
+ ]["value"][0]
# detector_offset_micros = hdf5_data["entry"]["DASlogs"]["BL10:Det:DSP1:Trig2:Delay"]["value"][0]
except KeyError:
detector_offset_micros = None
@@ -380,6 +408,7 @@ def get_run_number_from_nexus(nexus_path: str) -> int:
run_number = None
return run_number
+
def export_sample_images(
output_folder,
export_corrected_stack_of_sample_data,
@@ -387,7 +416,7 @@ def export_sample_images(
_sample_run_number,
_sample_data,
spectra_file_name=None,
- spectra_array=None
+ spectra_array=None,
):
logging.info(f"> Exporting sample corrected images to {output_folder} ...")
@@ -410,30 +439,37 @@ def export_sample_images(
if spectra_array is not None:
# manually create the file for spectra
- spectra_file_name = os.path.join(output_stack_folder, f"manually_created_{SPECTRA_FILE_PREFIX}")
+ spectra_file_name = os.path.join(
+ output_stack_folder, f"manually_created_{SPECTRA_FILE_PREFIX}"
+ )
_full_counts_array = np.empty_like(spectra_array)
for _index, _data in enumerate(_sample_data):
_full_counts_array[_index] = np.nansum(_data)
- pd_spectra = pd.DataFrame({
- "shutter_time": spectra_array,
- "counts": _full_counts_array
- })
+ pd_spectra = pd.DataFrame(
+ {"shutter_time": spectra_array, "counts": _full_counts_array}
+ )
pd_spectra.to_csv(spectra_file_name, index=False, sep=",")
- logging.info(f"\t -> Exporting manually created spectra file to {spectra_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting manually created spectra file to {spectra_file_name} is done!"
+ )
else:
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!")
-
+ logging.info(
+ f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!"
+ )
+
display(HTML(f"Created folder {output_stack_folder} for sample outputs!"))
-
+
if export_corrected_integrated_sample_data:
# making up the integrated sample data
sample_data_integrated = np.nanmean(_sample_data, axis=0)
full_file_name = os.path.join(sample_output_folder, "integrated.tif")
logging.info(f"\t -> Exporting integrated sample data to {full_file_name} ...")
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated sample data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated sample data to {full_file_name} is done!"
+ )
def export_ob_images(
@@ -449,10 +485,13 @@ def export_ob_images(
logging.info(f"> Exporting combined ob images to {output_folder} ...")
logging.info(f"\t{ob_run_numbers = }")
list_ob_runs_number_only = [
- str(isolate_run_number_from_full_path(_ob_run_number)) for _ob_run_number in ob_run_numbers
+ str(isolate_run_number_from_full_path(_ob_run_number))
+ for _ob_run_number in ob_run_numbers
]
if len(list_ob_runs_number_only) == 1:
- ob_output_folder = os.path.join(output_folder, f"ob_{list_ob_runs_number_only[0]}")
+ ob_output_folder = os.path.join(
+ output_folder, f"ob_{list_ob_runs_number_only[0]}"
+ )
else:
str_list_ob_runs = "_".join(list_ob_runs_number_only)
ob_output_folder = os.path.join(output_folder, f"ob_{str_list_ob_runs}")
@@ -479,24 +518,29 @@ def export_ob_images(
_output_file = os.path.join(output_stack_folder, f"image{_index:04d}.tif")
make_tiff(data=_data, filename=_output_file)
logging.info(f"\t -> Exporting ob data to {output_stack_folder} is done!")
-
+
if spectra_array is not None:
# manually create the file for spectra
- spectra_file_name = os.path.join(output_stack_folder, f"manually_created_{SPECTRA_FILE_PREFIX}")
+ spectra_file_name = os.path.join(
+ output_stack_folder, f"manually_created_{SPECTRA_FILE_PREFIX}"
+ )
_full_counts_array = np.empty_like(spectra_array)
for _index, _data in enumerate(ob_data_combined):
_full_counts_array[_index] = np.nansum(_data)
- pd_spectra = pd.DataFrame({
- "shutter_time": spectra_array,
- "counts": _full_counts_array
- })
+ pd_spectra = pd.DataFrame(
+ {"shutter_time": spectra_array, "counts": _full_counts_array}
+ )
pd_spectra.to_csv(spectra_file_name, index=False, sep=",")
- logging.info(f"\t -> Exporting manually created spectra file to {spectra_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting manually created spectra file to {spectra_file_name} is done!"
+ )
else:
# copy spectra file to the output folder
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!")
+ logging.info(
+ f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!"
+ )
display(HTML(f"Created folder {output_stack_folder} for OB outputs!"))
@@ -588,34 +632,40 @@ def update_dict_with_shutter_counts(master_dict: dict) -> tuple[dict, bool]:
if _value == "0":
break
list_shutter_counts.append(float(_value))
- master_dict[run_number][MasterDictKeys.shutter_counts] = list_shutter_counts
-
+ master_dict[run_number][MasterDictKeys.shutter_counts] = (
+ list_shutter_counts
+ )
+
return master_dict, status_all_shutter_counts_found
-def update_dict_with_spectra_files(master_dict: dict, spectra_array: np.ndarray = None) -> tuple[dict, bool]:
+def update_dict_with_spectra_files(
+ master_dict: dict, spectra_array: np.ndarray = None
+) -> tuple[dict, bool]:
"""update the master dict with spectra values from spectra file"""
status_all_spectra_found = True
for _run_number in master_dict.keys():
-
if spectra_array is not None:
master_dict[_run_number][MasterDictKeys.list_spectra] = spectra_array
- master_dict[_run_number][MasterDictKeys.spectra_file_name] = "Provided array"
-
- else:
+ master_dict[_run_number][MasterDictKeys.spectra_file_name] = (
+ "Provided array"
+ )
+ else:
data_path = master_dict[_run_number][MasterDictKeys.data_path]
_list_files = glob.glob(os.path.join(data_path, f"*_{SPECTRA_FILE_PREFIX}"))
-
+
if len(_list_files) == 0:
logging.info(f"Spectra file not found for run {_run_number}!")
master_dict[_run_number][MasterDictKeys.list_spectra] = None
status_all_spectra_found = False
continue
-
+
else:
spectra_file = _list_files[0]
- master_dict[_run_number][MasterDictKeys.spectra_file_name] = spectra_file
+ master_dict[_run_number][MasterDictKeys.spectra_file_name] = (
+ spectra_file
+ )
pd_spectra = pd.read_csv(spectra_file, sep=",", header=0)
shutter_time = pd_spectra["shutter_time"].values
master_dict[_run_number][MasterDictKeys.list_spectra] = shutter_time
@@ -636,11 +686,15 @@ def update_dict_with_proton_charge(master_dict: dict) -> tuple[dict, bool]:
try:
with h5py.File(_nexus_path, "r") as hdf5_data:
- proton_charge = hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ proton_charge = (
+ hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ )
except KeyError:
proton_charge = None
status_all_proton_charge_found = False
- master_dict[_run_number][MasterDictKeys.proton_charge] = np.float32(proton_charge)
+ master_dict[_run_number][MasterDictKeys.proton_charge] = np.float32(
+ proton_charge
+ )
return status_all_proton_charge_found
@@ -661,14 +715,18 @@ def update_dict_with_monitor_counts(master_dict: dict) -> bool:
except KeyError:
monitor_counts = None
status_all_monitor_counts_found = False
- master_dict[_run_number][MasterDictKeys.monitor_counts] = np.float32(monitor_counts)
+ master_dict[_run_number][MasterDictKeys.monitor_counts] = np.float32(
+ monitor_counts
+ )
return status_all_monitor_counts_found
def update_dict_with_list_of_images(master_dict: dict) -> dict:
"""update the master dict with list of images"""
for _run_number in master_dict.keys():
- list_tif = retrieve_list_of_tif(master_dict[_run_number][MasterDictKeys.data_path])
+ list_tif = retrieve_list_of_tif(
+ master_dict[_run_number][MasterDictKeys.data_path]
+ )
logging.info(f"Retrieved {len(list_tif)} tif files for run {_run_number}!")
master_dict[_run_number][MasterDictKeys.list_tif] = list_tif
@@ -680,7 +738,9 @@ def get_list_run_number(data_folder: str) -> list:
return list_run_number
-def update_dict_with_nexus_full_path(nexus_root_path: str, instrument: str, master_dict: dict) -> dict:
+def update_dict_with_nexus_full_path(
+ nexus_root_path: str, instrument: str, master_dict: dict
+) -> dict:
"""create dict of nexus path for each run number"""
for run_number in master_dict.keys():
master_dict[run_number][MasterDictKeys.nexus_path] = os.path.join(
@@ -704,7 +764,9 @@ def update_with_nexus_metadata(master_dict: dict) -> dict:
def update_dict_with_data_full_path(data_root_path: str, master_dict: dict) -> dict:
"""create dict of data path for each run number"""
for run_number in master_dict.keys():
- master_dict[run_number][MasterDictKeys.data_path] = os.path.join(data_root_path, f"Run_{run_number}")
+ master_dict[run_number][MasterDictKeys.data_path] = os.path.join(
+ data_root_path, f"Run_{run_number}"
+ )
def create_master_dict(
@@ -736,7 +798,9 @@ def create_master_dict(
# if all_shutter_counts_found:
logging.info("updating with spectra values!")
- master_dict, all_spectra_found = update_dict_with_spectra_files(master_dict, spectra_array=spectra_array)
+ master_dict, all_spectra_found = update_dict_with_spectra_files(
+ master_dict, spectra_array=spectra_array
+ )
if not all_spectra_found:
status_metadata.all_spectra_found = False
logging.info(f"{master_dict = }")
@@ -759,7 +823,9 @@ def create_master_dict(
return master_dict, status_metadata
-def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shutter_counts: list = None) -> list:
+def produce_list_shutter_for_each_image(
+ list_time_spectra: list = None, list_shutter_counts: list = None
+) -> list:
"""produce list of shutter counts for each image"""
delat_time_spectra = list_time_spectra[1] - list_time_spectra[0]
@@ -769,31 +835,41 @@ def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shu
logging.info(f"\t{list_index_jump = }")
logging.info(f"\t{list_shutter_counts = }")
- list_shutter_values_for_each_image = np.zeros(len(list_time_spectra), dtype=np.float32)
+ list_shutter_values_for_each_image = np.zeros(
+ len(list_time_spectra), dtype=np.float32
+ )
if len(list_shutter_counts) == 1: # resonance mode
list_shutter_values_for_each_image.fill(list_shutter_counts[0])
return list_shutter_values_for_each_image
- list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(list_shutter_counts[0])
+ list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(
+ list_shutter_counts[0]
+ )
for _index in range(1, len(list_index_jump)):
_start = list_index_jump[_index - 1]
_end = list_index_jump[_index]
- list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(list_shutter_counts[_index])
+ list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(
+ list_shutter_counts[_index]
+ )
- list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[-1]
+ list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[
+ -1
+ ]
return list_shutter_values_for_each_image
-def replace_zero_with_local_median(data: np.ndarray,
- kernel_size: Tuple[int, int, int] = (3, 3, 3),
- max_iterations: int = 10) -> np.ndarray:
+def replace_zero_with_local_median(
+ data: np.ndarray,
+ kernel_size: Tuple[int, int, int] = (3, 3, 3),
+ max_iterations: int = 10,
+) -> np.ndarray:
"""
Replace 0 values in a 3D array using local median filtering.
This function ONLY processes small neighborhoods around 0 pixels,
avoiding expensive computation on the entire dataset.
-
+
Parameters:
-----------
data : np.ndarray
@@ -804,7 +880,7 @@ def replace_zero_with_local_median(data: np.ndarray,
max_iterations : int
Maximum number of iterations to replace 0 values
Default is 10
-
+
Returns:
--------
np.ndarray
@@ -823,7 +899,7 @@ def replace_zero_with_local_median(data: np.ndarray,
# Calculate padding for kernel
pad_h, pad_w, pad_d = [k // 2 for k in kernel_size]
-
+
for iteration in range(max_iterations):
# Find current 0 locations
zero_coords = np.argwhere(result == 0)
@@ -833,13 +909,15 @@ def replace_zero_with_local_median(data: np.ndarray,
logging.info(f"All 0 values replaced after {iteration} iterations")
break
- logging.info(f"Iteration {iteration + 1}: {current_zero_count} 0 values remaining")
+ logging.info(
+ f"Iteration {iteration + 1}: {current_zero_count} 0 values remaining"
+ )
# Process each 0 pixel individually
replaced_count = 0
for coord in zero_coords:
y, x, z = coord
-
+
# Define the local neighborhood bounds
y_min = max(0, y - pad_h)
y_max = min(result.shape[0], y + pad_h + 1)
@@ -847,13 +925,13 @@ def replace_zero_with_local_median(data: np.ndarray,
x_max = min(result.shape[1], x + pad_w + 1)
z_min = max(0, z - pad_d)
z_max = min(result.shape[2], z + pad_d + 1)
-
+
# Extract the local neighborhood
neighborhood = result[y_min:y_max, x_min:x_max, z_min:z_max]
-
+
# Get non-NaN values in the neighborhood
valid_values = neighborhood[~np.isnan(neighborhood)]
-
+
# If we have valid values, compute median and replace
if len(valid_values) > 0:
median_value = np.median(valid_values)
@@ -865,30 +943,34 @@ def replace_zero_with_local_median(data: np.ndarray,
# If no progress was made, break
if replaced_count == 0:
remaining_zero_count = np.sum(result == 0)
- logging.info(f"No progress made. {remaining_zero_count} zero values could not be replaced")
+ logging.info(
+ f"No progress made. {remaining_zero_count} zero values could not be replaced"
+ )
logging.info("(These may be in regions with no valid neighbors)")
break
final_zero_count = np.sum(result == 0)
logging.info(f"Final zero count: {final_zero_count}")
- logging.info(f"Successfully replaced {initial_zero_count - final_zero_count} zero values")
+ logging.info(
+ f"Successfully replaced {initial_zero_count - final_zero_count} zero values"
+ )
return result
def combine_dc_images(dc_master_dict: dict) -> np.ndarray:
"""combine all dc images
-
+
Parameters:
-----------
dc_master_dict : dict
master dict of dc run numbers
-
+
Returns:
--------
np.ndarray
combined dc data
-
+
"""
logging.info("Combining all dark current images")
full_dc_data = []
@@ -899,7 +981,9 @@ def combine_dc_images(dc_master_dict: dict) -> np.ndarray:
for _dc_run_number in dc_master_dict.keys():
logging.info(f"Combining dc# {_dc_run_number} ...")
- dc_data = np.array(dc_master_dict[_dc_run_number][MasterDictKeys.data], dtype=np.float32)
+ dc_data = np.array(
+ dc_master_dict[_dc_run_number][MasterDictKeys.data], dtype=np.float32
+ )
full_dc_data.append(dc_data)
logging.info(f"{np.shape(full_dc_data) = }")
@@ -918,11 +1002,11 @@ def combine_ob_images(
use_shutter_counts: bool = False,
replace_ob_zeros_by_nan: bool = False,
replace_ob_zeros_by_local_median: bool = False,
- kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
+ kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
max_iterations: int = 10,
) -> Tuple[np.ndarray, float]:
"""combine all ob images and correct by proton charge and shutter counts
-
+
Parameters:
-----------
ob_master_dict : dict
@@ -941,14 +1025,14 @@ def combine_ob_images(
kernel size for local median filtering
max_iterations : int
maximum number of iterations for local median filtering
-
+
Returns:
--------
np.ndarray
combined ob data
float
total proton charge used for correction
-
+
"""
logging.info("Combining all open beam images")
@@ -956,10 +1040,14 @@ def combine_ob_images(
# logging.info(f"\tcorrecting by monitor counts: {use_monitor_counts}")
logging.info(f"\tshutter counts: {use_shutter_counts}")
logging.info(f"\treplace ob zeros by nan: {replace_ob_zeros_by_nan}")
- logging.info(f"\treplace ob zeros by local median: {replace_ob_zeros_by_local_median}")
- logging.info(f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
- f"x:{kernel_size_for_local_median[1]}, "
- f"tof:{kernel_size_for_local_median[2]}")
+ logging.info(
+ f"\treplace ob zeros by local median: {replace_ob_zeros_by_local_median}"
+ )
+ logging.info(
+ f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
+ f"x:{kernel_size_for_local_median[1]}, "
+ f"tof:{kernel_size_for_local_median[2]}"
+ )
full_ob_data_corrected = []
if use_proton_charge:
@@ -978,7 +1066,9 @@ def combine_ob_images(
for _ob_run_number in ob_master_dict.keys():
logging.info(f"Combining ob# {_ob_run_number} ...")
- ob_data = np.array(ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32)
+ ob_data = np.array(
+ ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32
+ )
# get statistics of ob data
data_shape = ob_data.shape
@@ -987,7 +1077,9 @@ def combine_ob_images(
number_of_zeros = np.sum(ob_data == 0)
logging.info(f"\t ob data shape: {data_shape}")
logging.info(f"\t Number of zeros in ob data: {number_of_zeros}")
- logging.info(f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%")
+ logging.info(
+ f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
+ )
logging.info(f"\t Mean of ob data: {np.mean(ob_data)}")
logging.info(f"\t maximum of ob data: {np.max(ob_data)}")
logging.info(f"\t minimum of ob data: {np.min(ob_data)}")
@@ -999,7 +1091,7 @@ def combine_ob_images(
logging.info(f"\t\t proton charge: {proton_charge} C")
logging.info(f"\t\t{type(proton_charge) = }")
logging.info(f"\t\tbefore division: {proton_charge.dtype = }")
- ob_data *= (proton_charge / sum_proton_charge) # weighted sum
+ ob_data *= proton_charge / sum_proton_charge # weighted sum
logging.info(f"\t\tafter division: {ob_data.dtype = }")
logging.info(f"{ob_data.shape = }")
@@ -1015,14 +1107,20 @@ def combine_ob_images(
logging.info("\t -> Normalized by shutter counts")
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=ob_master_dict[_ob_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=ob_master_dict[_ob_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
logging.info(f"{list_shutter_values_for_each_image.shape = }")
temp_ob_data = np.empty_like(ob_data, dtype=np.float32)
for _index in range(len(list_shutter_values_for_each_image)):
- temp_ob_data[_index] = ob_data[_index] / list_shutter_values_for_each_image[_index]
+ temp_ob_data[_index] = (
+ ob_data[_index] / list_shutter_values_for_each_image[_index]
+ )
logging.info(f"{temp_ob_data.shape = }")
ob_data = temp_ob_data.copy()
@@ -1030,9 +1128,11 @@ def combine_ob_images(
# logging.info(f"{ob_data_combined.shape = }")
if replace_ob_zeros_by_local_median:
- ob_data = replace_zero_with_local_median(ob_data,
- kernel_size=kernel_size_for_local_median,
- max_iterations=max_iterations)
+ ob_data = replace_zero_with_local_median(
+ ob_data,
+ kernel_size=kernel_size_for_local_median,
+ max_iterations=max_iterations,
+ )
full_ob_data_corrected.append(ob_data)
logging.info(f"{np.shape(full_ob_data_corrected) = }")
@@ -1043,7 +1143,7 @@ def combine_ob_images(
ob_data_combined = np.array(full_ob_data_corrected).sum(axis=0)
else:
ob_data_combined = np.array(full_ob_data_corrected).mean(axis=0)
-
+
logging.info(f"\tafter: {ob_data_combined.shape = }")
# remove zeros
@@ -1061,11 +1161,11 @@ def combine_images(
# use_shutter_counts: bool = False,
replace_zeros_by_nan: bool = False,
replace_zeros_by_local_median: bool = False,
- kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
+ kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
max_iterations: int = 10,
) -> Tuple[np.ndarray, float]:
"""combine all images and correct by proton charge and shutter counts
-
+
Parameters:
-----------
master_dict : dict
@@ -1084,14 +1184,14 @@ def combine_images(
kernel size for local median filtering
max_iterations : int
maximum number of iterations for local median filtering
-
+
Returns:
--------
np.ndarray
combined data
float
total proton charge used for correction
-
+
"""
logging.info(f"Combining all {data_type} images")
@@ -1100,9 +1200,11 @@ def combine_images(
# logging.info(f"\tshutter counts: {use_shutter_counts}")
logging.info(f"\treplace zeros by nan: {replace_zeros_by_nan}")
logging.info(f"\treplace zeros by local median: {replace_zeros_by_local_median}")
- logging.info(f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
- f"x:{kernel_size_for_local_median[1]}, "
- f"tof:{kernel_size_for_local_median[2]}")
+ logging.info(
+ f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
+ f"x:{kernel_size_for_local_median[1]}, "
+ f"tof:{kernel_size_for_local_median[2]}"
+ )
full_data_corrected = []
if use_proton_charge:
@@ -1112,10 +1214,14 @@ def combine_images(
for _run_number in master_dict.keys():
proton_charge = master_dict[_run_number][MasterDictKeys.proton_charge]
list_proton_charges.append(proton_charge)
- logging.info(f"\t {data_type}# {_run_number}: proton charge = {proton_charge} C")
+ logging.info(
+ f"\t {data_type}# {_run_number}: proton charge = {proton_charge} C"
+ )
sum_proton_charge = np.sum(list_proton_charges)
- logging.info(f"\t Total proton charge of all {data_type} runs: {sum_proton_charge} C")
+ logging.info(
+ f"\t Total proton charge of all {data_type} runs: {sum_proton_charge} C"
+ )
else:
sum_proton_charge = 1.0 # dummy value to avoid division by zero
@@ -1130,7 +1236,9 @@ def combine_images(
number_of_zeros = np.sum(data == 0)
logging.info(f"\t {data_type} data shape: {data_shape}")
logging.info(f"\t Number of zeros in {data_type} data: {number_of_zeros}")
- logging.info(f"\t Percentage of zeros in {data_type} data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%")
+ logging.info(
+ f"\t Percentage of zeros in {data_type} data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
+ )
logging.info(f"\t Mean of {data_type} data: {np.mean(data)}")
logging.info(f"\t maximum of {data_type} data: {np.max(data)}")
logging.info(f"\t minimum of {data_type} data: {np.min(data)}")
@@ -1142,14 +1250,16 @@ def combine_images(
logging.info(f"\t\t proton charge: {proton_charge} C")
logging.info(f"\t\t{type(proton_charge) = }")
logging.info(f"\t\tbefore division: {proton_charge.dtype = }")
- data *= (proton_charge / sum_proton_charge) # weighted sum
+ data *= proton_charge / sum_proton_charge # weighted sum
logging.info(f"\t\tafter division: {data.dtype = }")
logging.info(f"{data.shape = }")
if replace_zeros_by_local_median:
- data = replace_zero_with_local_median(data,
- kernel_size=kernel_size_for_local_median,
- max_iterations=max_iterations)
+ data = replace_zero_with_local_median(
+ data,
+ kernel_size=kernel_size_for_local_median,
+ max_iterations=max_iterations,
+ )
full_data_corrected.append(data)
logging.info(f"{np.shape(full_data_corrected) = }")
@@ -1160,7 +1270,7 @@ def combine_images(
data_combined = np.array(full_data_corrected).sum(axis=0)
else:
data_combined = np.array(full_data_corrected).mean(axis=0)
-
+
logging.info(f"\tafter: {data_combined.shape = }")
# remove zeros
@@ -1178,13 +1288,13 @@ def combine_images(
# ):
# """
# Normalize sample data by shutter counts for each image.
-
+
# This function normalizes sample data by dividing each image by its corresponding
# shutter count value. The shutter count values are determined by mapping the time
# spectra from the open beam data to the shutter counts recorded for the sample.
# Images with zero shutter counts are replaced with NaN values to avoid division
# by zero errors.
-
+
# Parameters
# ----------
# sample_master_dict : dict, optional
@@ -1199,13 +1309,13 @@ def combine_images(
# Expected to have structure: {run_number: {MasterDictKeys.list_spectra: list, ...}}
# first_ob_run_number : str or int, optional
# The run number key to access the time spectra from the first open beam run
-
+
# Returns
# -------
# numpy.ndarray
# Normalized sample data array with same shape as input _sample_data.
# Images corresponding to zero shutter counts are set to NaN.
-
+
# Notes
# -----
# The normalization process involves:
@@ -1214,10 +1324,10 @@ def combine_images(
# 3. Mapping shutter count values to each image based on time spectra
# 4. Dividing each sample image by its corresponding shutter count
# 5. Setting images with zero shutter counts to NaN
-
+
# This function is typically used in neutron imaging data processing where
# shutter counts represent the exposure time or beam intensity for each image.
-
+
# Examples
# --------
# >>> normalized_data = normalization_by_shutter_counts(
@@ -1228,7 +1338,7 @@ def combine_images(
# ... first_ob_run_number="Run_12340"
# ... )
# """
-
+
# list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
# list_time_spectra=ob_master_dict[first_ob_run_number][MasterDictKeys.list_spectra],
# list_shutter_counts=sample_master_dict[_sample_run_number][MasterDictKeys.shutter_counts],
@@ -1245,40 +1355,63 @@ def combine_images(
# return _sample_data
-def perform_normalization(_sample_data=None, ob_data_combined=None, dc_data_combined=None):
-
+def perform_normalization(
+ _sample_data=None, ob_data_combined=None, dc_data_combined=None
+):
# working on each image (TOF) independently
if dc_data_combined is not None:
- logging.info(f"normalization with DC subtraction")
- _normalized_data = np.divide(np.subtract(_sample_data, dc_data_combined), np.subtract(ob_data_combined, dc_data_combined),
- out=np.zeros_like(_sample_data),
- where=(ob_data_combined - dc_data_combined)!=0)
+ logging.info("normalization with DC subtraction")
+ _normalized_data = np.divide(
+ np.subtract(_sample_data, dc_data_combined),
+ np.subtract(ob_data_combined, dc_data_combined),
+ out=np.zeros_like(_sample_data),
+ where=(ob_data_combined - dc_data_combined) != 0,
+ )
else:
- logging.info(f"normalization without DC subtraction")
- _normalized_data = np.divide(_sample_data, ob_data_combined,
- out=np.zeros_like(_sample_data),
- where=ob_data_combined!=0)
+ logging.info("normalization without DC subtraction")
+ _normalized_data = np.divide(
+ _sample_data,
+ ob_data_combined,
+ out=np.zeros_like(_sample_data),
+ where=ob_data_combined != 0,
+ )
_normalized_data[ob_data_combined == 0] = 0
-
+
# Integration of sample, dc and ob and then division
if dc_data_combined is not None:
- logging.info(f"normalization with DC subtraction - integrated")
- _integrated_normalized_data = np.divide(np.subtract(np.sum(_sample_data, axis=0), np.sum(dc_data_combined, axis=0)),
- np.subtract(np.sum(ob_data_combined, axis=0), np.sum(dc_data_combined, axis=0)),
- out=np.zeros_like(np.sum(_sample_data, axis=0)),
- where=(np.sum(ob_data_combined, axis=0) - np.sum(dc_data_combined, axis=0))!=0)
+ logging.info("normalization with DC subtraction - integrated")
+ _integrated_normalized_data = np.divide(
+ np.subtract(np.sum(_sample_data, axis=0), np.sum(dc_data_combined, axis=0)),
+ np.subtract(
+ np.sum(ob_data_combined, axis=0), np.sum(dc_data_combined, axis=0)
+ ),
+ out=np.zeros_like(np.sum(_sample_data, axis=0)),
+ where=(np.sum(ob_data_combined, axis=0) - np.sum(dc_data_combined, axis=0))
+ != 0,
+ )
else:
- logging.info(f"normalization without DC subtraction - integrated")
- _integrated_normalized_data = np.divide(np.sum(_sample_data, axis=0), np.sum(ob_data_combined, axis=0),
- out=np.zeros_like(np.sum(_sample_data, axis=0)),
- where=np.sum(ob_data_combined, axis=0)!=0)
+ logging.info("normalization without DC subtraction - integrated")
+ _integrated_normalized_data = np.divide(
+ np.sum(_sample_data, axis=0),
+ np.sum(ob_data_combined, axis=0),
+ out=np.zeros_like(np.sum(_sample_data, axis=0)),
+ where=np.sum(ob_data_combined, axis=0) != 0,
+ )
- return {'normalized_data': _normalized_data,
- 'integrated_normalized_data': _integrated_normalized_data}
+ return {
+ "normalized_data": _normalized_data,
+ "integrated_normalized_data": _integrated_normalized_data,
+ }
-def perform_spectrum_normalization(roi=None, sample_data=None, ob_data_combined_for_spectrum=None, dc_data_combined=None, dc_data_combined_for_spectrum=None):
+def perform_spectrum_normalization(
+ roi=None,
+ sample_data=None,
+ ob_data_combined_for_spectrum=None,
+ dc_data_combined=None,
+ dc_data_combined_for_spectrum=None,
+):
_spectrum_normalized_data = None
if roi is not None:
x0 = roi.left
@@ -1286,36 +1419,50 @@ def perform_spectrum_normalization(roi=None, sample_data=None, ob_data_combined_
width = roi.width
height = roi.height
- _sample_data_combined_for_spectrum = [np.sum(np.sum(_data[y0: y0+height, x0: x0+width], axis=0), axis=0) for _data in sample_data]
+ _sample_data_combined_for_spectrum = [
+ np.sum(np.sum(_data[y0 : y0 + height, x0 : x0 + width], axis=0), axis=0)
+ for _data in sample_data
+ ]
if dc_data_combined is not None:
- _spectrum_normalized_data = np.divide(np.subtract(_sample_data_combined_for_spectrum, dc_data_combined_for_spectrum),
- np.subtract(ob_data_combined_for_spectrum, dc_data_combined_for_spectrum),
- out=np.zeros_like(_sample_data_combined_for_spectrum),
- where=(ob_data_combined_for_spectrum - dc_data_combined_for_spectrum)!=0)
+ _spectrum_normalized_data = np.divide(
+ np.subtract(
+ _sample_data_combined_for_spectrum, dc_data_combined_for_spectrum
+ ),
+ np.subtract(
+ ob_data_combined_for_spectrum, dc_data_combined_for_spectrum
+ ),
+ out=np.zeros_like(_sample_data_combined_for_spectrum),
+ where=(ob_data_combined_for_spectrum - dc_data_combined_for_spectrum)
+ != 0,
+ )
else:
- _spectrum_normalized_data = np.divide(_sample_data_combined_for_spectrum, ob_data_combined_for_spectrum,
- out=np.zeros_like(_sample_data_combined_for_spectrum),
- where=ob_data_combined_for_spectrum!=0)
+ _spectrum_normalized_data = np.divide(
+ _sample_data_combined_for_spectrum,
+ ob_data_combined_for_spectrum,
+ out=np.zeros_like(_sample_data_combined_for_spectrum),
+ where=ob_data_combined_for_spectrum != 0,
+ )
logging.info(f"{np.shape(_spectrum_normalized_data) = }")
return _spectrum_normalized_data
-def export_normalized_data(ob_master_dict=None,
- sample_master_dict=None,
- _sample_run_number=None,
- normalized_data=None,
- integrated_normalized_data=None,
- _spectrum_normalized_data=None,
- lambda_array=None,
- energy_array=None,
- output_folder="./",
- export_corrected_stack_of_normalized_data=False,
- export_corrected_integrated_normalized_data=False,
- roi=None,
- spectra_array=None,
- spectra_file=None,):
-
+def export_normalized_data(
+ ob_master_dict=None,
+ sample_master_dict=None,
+ _sample_run_number=None,
+ normalized_data=None,
+ integrated_normalized_data=None,
+ _spectrum_normalized_data=None,
+ lambda_array=None,
+ energy_array=None,
+ output_folder="./",
+ export_corrected_stack_of_normalized_data=False,
+ export_corrected_integrated_normalized_data=False,
+ roi=None,
+ spectra_array=None,
+ spectra_file=None,
+):
logging.info("Exporting normalized data ...")
list_ob_runs = list(ob_master_dict.keys())
@@ -1327,38 +1474,52 @@ def export_normalized_data(ob_master_dict=None,
os.makedirs(full_output_folder, exist_ok=True)
if roi is not None:
- logging.info(f"\t -> exporting the spectrum normalization")
+ logging.info("\t -> exporting the spectrum normalization")
logging.info(f"{roi =}")
x0 = roi.left
y0 = roi.top
width = roi.width
height = roi.height
- full_file_name = os.path.join(full_output_folder, "spectrum_normalization_profile.txt")
- pd_dataframe = pd.DataFrame({
- "file_index": np.arange(len(lambda_array)),
- "lambda (Angstroms)": lambda_array,
- "energy (eV)": energy_array,
- "spectrum normalization": _spectrum_normalized_data
- })
- pd_dataframe.attrs['roi [left, top, width, height]'] = f"{x0}, {y0}, {width}, {height}"
-
- with open(full_file_name, 'w') as f:
+ full_file_name = os.path.join(
+ full_output_folder, "spectrum_normalization_profile.txt"
+ )
+ pd_dataframe = pd.DataFrame(
+ {
+ "file_index": np.arange(len(lambda_array)),
+ "lambda (Angstroms)": lambda_array,
+ "energy (eV)": energy_array,
+ "spectrum normalization": _spectrum_normalized_data,
+ }
+ )
+ pd_dataframe.attrs["roi [left, top, width, height]"] = (
+ f"{x0}, {y0}, {width}, {height}"
+ )
+
+ with open(full_file_name, "w") as f:
# Write metadata as comments
for key, value in pd_dataframe.attrs.items():
f.write(f"# {key}: {value}\n")
-
+
# Write the DataFrame
- pd_dataframe.to_csv(f, index=False)
-
+ pd_dataframe.to_csv(f, index=False)
+
pd_dataframe.to_csv(full_file_name, index=False, sep=",")
- logging.info(f"\t -> Exporting the spectrum normalization profile to {full_file_name}")
+ logging.info(
+ f"\t -> Exporting the spectrum normalization profile to {full_file_name}"
+ )
if export_corrected_integrated_normalized_data:
# making up the integrated sample data
full_file_name = os.path.join(full_output_folder, "normalized_integrated.tif")
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} ...")
- make_tiff(data=integrated_normalized_data[_sample_run_number], filename=full_file_name)
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} ..."
+ )
+ make_tiff(
+ data=integrated_normalized_data[_sample_run_number], filename=full_file_name
+ )
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} is done!"
+ )
if export_corrected_stack_of_normalized_data:
output_stack_folder = os.path.join(full_output_folder, "stack")
@@ -1368,15 +1529,18 @@ def export_normalized_data(ob_master_dict=None,
for _index, _data in enumerate(normalized_data[_sample_run_number]):
_output_file = os.path.join(output_stack_folder, f"image{_index:04d}.tif")
make_tiff(data=_data, filename=_output_file)
- logging.info(f"\t -> Exporting normalized data to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting normalized data to {output_stack_folder} is done!"
+ )
print(f"Exported normalized tif images are in: {output_stack_folder}!")
-
- # spectra_file = sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name]
- export_spectra_file(spectra_array=spectra_array,
- spectra_file=spectra_file,
- output_stack_folder=output_stack_folder,
- normalized_data=normalized_data[_sample_run_number])
+ # spectra_file = sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name]
+ export_spectra_file(
+ spectra_array=spectra_array,
+ spectra_file=spectra_file,
+ output_stack_folder=output_stack_folder,
+ normalized_data=normalized_data[_sample_run_number],
+ )
# create x-axis file
create_x_axis_file(
@@ -1386,49 +1550,58 @@ def export_normalized_data(ob_master_dict=None,
)
-def manually_create_and_export_spectra_file(spectra_array=None, output_folder=None, normalized_data=None):
- # manually create the file for spectra
- spectra_file_name = os.path.join(output_folder, f"manually_created_{SPECTRA_FILE_PREFIX}")
- _full_counts_array = np.empty_like(spectra_array)
- for _index, _data in enumerate(normalized_data):
- _full_counts_array[_index] = np.nansum(_data)
- pd_spectra = pd.DataFrame({
- "shutter_time": spectra_array,
- "counts": _full_counts_array
- })
- pd_spectra.to_csv(spectra_file_name, index=False, sep=",")
- logging.info(f"\t -> Exporting manually created spectra file to {spectra_file_name} is done!")
-
-
-def export_spectra_file(spectra_array=None,
- spectra_file=None,
- output_stack_folder=None,
- normalized_data=None):
-
+def manually_create_and_export_spectra_file(
+ spectra_array=None, output_folder=None, normalized_data=None
+):
+ # manually create the file for spectra
+ spectra_file_name = os.path.join(
+ output_folder, f"manually_created_{SPECTRA_FILE_PREFIX}"
+ )
+ _full_counts_array = np.empty_like(spectra_array)
+ for _index, _data in enumerate(normalized_data):
+ _full_counts_array[_index] = np.nansum(_data)
+ pd_spectra = pd.DataFrame(
+ {"shutter_time": spectra_array, "counts": _full_counts_array}
+ )
+ pd_spectra.to_csv(spectra_file_name, index=False, sep=",")
+ logging.info(
+ f"\t -> Exporting manually created spectra file to {spectra_file_name} is done!"
+ )
+
+
+def export_spectra_file(
+ spectra_array=None,
+ spectra_file=None,
+ output_stack_folder=None,
+ normalized_data=None,
+):
if spectra_array is not None:
- manually_create_and_export_spectra_file(spectra_array=spectra_array,
- output_folder=output_stack_folder,
- normalized_data=normalized_data)
+ manually_create_and_export_spectra_file(
+ spectra_array=spectra_array,
+ output_folder=output_stack_folder,
+ normalized_data=normalized_data,
+ )
else:
-
if spectra_file and Path(spectra_file).exists():
- logging.info(f"Exported time spectra file {spectra_file} to {output_stack_folder}!")
+ logging.info(
+ f"Exported time spectra file {spectra_file} to {output_stack_folder}!"
+ )
shutil.copy(spectra_file, output_stack_folder)
-def export_corrected_normalized_data(sample_master_dict=None,
- ob_master_dict=None,
- combined_normalized_data=None,
- integrated_normalized_data=None,
- export_corrected_integrated_combined_normalized_data=False,
- export_corrected_stack_of_combined_normalized_data=False,
- lambda_array=None,
- energy_array=None,
- output_folder="./",
- spectra_array=None
+def export_corrected_normalized_data(
+ sample_master_dict=None,
+ ob_master_dict=None,
+ combined_normalized_data=None,
+ integrated_normalized_data=None,
+ export_corrected_integrated_combined_normalized_data=False,
+ export_corrected_stack_of_combined_normalized_data=False,
+ lambda_array=None,
+ energy_array=None,
+ output_folder="./",
+ spectra_array=None,
):
-
list_sample_runs = list(sample_master_dict.keys())
_sample_str = ""
for _run in list_sample_runs:
@@ -1449,9 +1622,13 @@ def export_corrected_normalized_data(sample_master_dict=None,
# making up the integrated sample data
data_integrated = np.nanmean(combined_normalized_data, axis=0)
full_file_name = os.path.join(full_output_folder, "integrated.tif")
- logging.info(f"\t -> Exporting integrated combined normalized data to {full_file_name} ...")
+ logging.info(
+ f"\t -> Exporting integrated combined normalized data to {full_file_name} ..."
+ )
make_tiff(data=data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated combined normalized data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated combined normalized data to {full_file_name} is done!"
+ )
if export_corrected_stack_of_combined_normalized_data:
output_stack_folder = os.path.join(full_output_folder, "stack")
@@ -1461,21 +1638,28 @@ def export_corrected_normalized_data(sample_master_dict=None,
for _index, _data in enumerate(combined_normalized_data):
_output_file = os.path.join(output_stack_folder, f"image{_index:04d}.tif")
make_tiff(data=_data, filename=_output_file)
- logging.info(f"\t -> Exporting combined normalized data to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting combined normalized data to {output_stack_folder} is done!"
+ )
print(f"Exported combined normalized tif images are in: {output_stack_folder}!")
-
- export_spectra_file(spectra_array=spectra_array,
- spectra_file=spectra_file,
- output_stack_folder=output_stack_folder,
- normalized_data=combined_normalized_data)
+ export_spectra_file(
+ spectra_array=spectra_array,
+ spectra_file=spectra_file,
+ output_stack_folder=output_stack_folder,
+ normalized_data=combined_normalized_data,
+ )
# copy one of the spectra file to the output folder, or the manually defined one
- spectra_file = sample_master_dict[list_sample_runs[0]][MasterDictKeys.spectra_file_name]
- export_spectra_file(spectra_array=spectra_array,
- spectra_file=spectra_file,
- output_stack_folder=output_stack_folder,
- normalized_data=combined_normalized_data)
+ spectra_file = sample_master_dict[list_sample_runs[0]][
+ MasterDictKeys.spectra_file_name
+ ]
+ export_spectra_file(
+ spectra_array=spectra_array,
+ spectra_file=spectra_file,
+ output_stack_folder=output_stack_folder,
+ normalized_data=combined_normalized_data,
+ )
# create x-axis file
create_x_axis_file(
@@ -1486,57 +1670,69 @@ def export_corrected_normalized_data(sample_master_dict=None,
def read_container_roi_file(container_roi_file=None) -> tuple[int, int, int, int]:
- master_dict = load_json(container_roi_file)
- list_container_values = master_dict['list_container_values']
- return list_container_values
-
-
-def save_container_roi_file(output_folder:str,
- sample_run_number: str,
- container_roi: Roi,
- list_container_values: list,
- integrated_image: np.ndarray):
+ master_dict = load_json(container_roi_file)
+ list_container_values = master_dict["list_container_values"]
+ return list_container_values
+
+
+def save_container_roi_file(
+ output_folder: str,
+ sample_run_number: str,
+ container_roi: Roi,
+ list_container_values: list,
+ integrated_image: np.ndarray,
+):
# container_roi_file = os.path.join(output_folder, f"container_roi_of_run_{sample_run_number}.tiff")
- container_roi_file = os.path.join(output_folder, f"container_roi_of_run_{sample_run_number}.json")
-
+ container_roi_file = os.path.join(
+ output_folder, f"container_roi_of_run_{sample_run_number}.json"
+ )
+
logging.info(f"Saving container roi file to {container_roi_file}")
# scitiff_dict = {'container_roi': container_roi,
# 'list_container_values': list_container_values,
# }
-
+
integrated_image = integrated_image.astype(float)
list_container_values = [float(_value) for _value in list_container_values]
- master_dict = {'integrated_image': integrated_image.tolist(),
- 'container_roi': {'left': float(container_roi.left),
- 'top': float(container_roi.top),
- 'width': float(container_roi.width),
- 'height': float(container_roi.height)},
- 'list_container_values': list_container_values}
-
+ master_dict = {
+ "integrated_image": integrated_image.tolist(),
+ "container_roi": {
+ "left": float(container_roi.left),
+ "top": float(container_roi.top),
+ "width": float(container_roi.width),
+ "height": float(container_roi.height),
+ },
+ "list_container_values": list_container_values,
+ }
+
save_json(container_roi_file, master_dict)
return container_roi_file
-
-
-def normalize_by_container_roi(sample_data: np.ndarray,
- container_roi: Roi,
- container_roi_file: str,
- output_folder: str,
- sample_run_number: str) -> np.ndarray:
+
+
+def normalize_by_container_roi(
+ sample_data: np.ndarray,
+ container_roi: Roi,
+ container_roi_file: str,
+ output_folder: str,
+ sample_run_number: str,
+) -> np.ndarray:
"""normalize sample data subtracting by container roi"""
- logging.info(f"in normalize_by_container_roi:")
+ logging.info("in normalize_by_container_roi:")
if container_roi_file is not None:
logging.info(f"\t {container_roi_file = }")
- _container_value_array: float = read_container_roi_file(container_roi_file=container_roi_file)
+ _container_value_array: float = read_container_roi_file(
+ container_roi_file=container_roi_file
+ )
logging.info(f"\t{_container_value_array =}")
-
+
_normalized_sample = np.empty_like(sample_data)
for i, _sample in enumerate(sample_data):
_container_value = _container_value_array[i]
_log_sample = -np.log(_sample)
_log_container_value = -np.log(_container_value)
_log_normalized_sample = _log_sample - _log_container_value
- _normalized_sample[i] = np.exp(- _log_normalized_sample)
+ _normalized_sample[i] = np.exp(-_log_normalized_sample)
else:
logging.info(f"\t {container_roi = }")
@@ -1544,38 +1740,44 @@ def normalize_by_container_roi(sample_data: np.ndarray,
y0: int = container_roi.top
width: int = container_roi.width
height: int = container_roi.height
-
+
_normalized_sample = np.empty_like(sample_data)
list_container_values = []
for i, _sample in enumerate(sample_data):
- _container_value = np.mean(np.mean(_sample[y0:y0 + height, x0:x0 + width], axis=0), axis=0)
+ _container_value = np.mean(
+ np.mean(_sample[y0 : y0 + height, x0 : x0 + width], axis=0), axis=0
+ )
list_container_values.append(_container_value)
_log_sample = -np.log(_sample)
_log_container_value = -np.log(_container_value)
_log_normalized_sample = _log_sample - _log_container_value
- _normalized_sample[i] = np.exp(- _log_normalized_sample)
-
+ _normalized_sample[i] = np.exp(-_log_normalized_sample)
+
# save the container roi file
- container_roi_file = save_container_roi_file(output_folder=output_folder,
- sample_run_number=sample_run_number,
- container_roi=container_roi,
- list_container_values=list_container_values,
- integrated_image=np.sum(sample_data, axis=0))
-
+ container_roi_file = save_container_roi_file(
+ output_folder=output_folder,
+ sample_run_number=sample_run_number,
+ container_roi=container_roi,
+ list_container_values=list_container_values,
+ integrated_image=np.sum(sample_data, axis=0),
+ )
+
return _normalized_sample, container_roi_file
def logging_statistics_of_data(data=None, data_type=DataType.sample):
- data_shape = data.shape
- nbr_pixels = data_shape[1] * data_shape[2]
- logging.info(f" **** Statistics of {data_type} data *****")
- number_of_zeros = np.sum(data == 0)
- logging.info(f"\t {data_type} data shape: {data_shape}")
- logging.info(f"\t data type of _sample_data: {data.dtype}")
- logging.info(f"\t Number of zeros in {data_type} data: {number_of_zeros}")
- logging.info(f"\t Number of nan in {data_type} data: {np.sum(np.isnan(data))}")
- logging.info(f"\t Percentage of zeros in {data_type} data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%")
- logging.info(f"\t Mean of {data_type} data: {np.mean(data)}")
- logging.info(f"\t maximum of {data_type} data: {np.max(data)}")
- logging.info(f"\t minimum of {data_type} data: {np.min(data)}")
- logging.info("**********************************")
\ No newline at end of file
+ data_shape = data.shape
+ nbr_pixels = data_shape[1] * data_shape[2]
+ logging.info(f" **** Statistics of {data_type} data *****")
+ number_of_zeros = np.sum(data == 0)
+ logging.info(f"\t {data_type} data shape: {data_shape}")
+ logging.info(f"\t data type of _sample_data: {data.dtype}")
+ logging.info(f"\t Number of zeros in {data_type} data: {number_of_zeros}")
+ logging.info(f"\t Number of nan in {data_type} data: {np.sum(np.isnan(data))}")
+ logging.info(
+ f"\t Percentage of zeros in {data_type} data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
+ )
+ logging.info(f"\t Mean of {data_type} data: {np.mean(data)}")
+ logging.info(f"\t maximum of {data_type} data: {np.max(data)}")
+ logging.info(f"\t minimum of {data_type} data: {np.min(data)}")
+ logging.info("**********************************")
diff --git a/notebooks/__code/oncat.py b/notebooks/__code/oncat.py
index ed55b60a..08976601 100755
--- a/notebooks/__code/oncat.py
+++ b/notebooks/__code/oncat.py
@@ -25,7 +25,10 @@ def __init__(self):
def authentication(self):
try:
- self.oncat.login(self.username, str(getpass.getpass(f"Enter Password for {self.username}:")))
+ self.oncat.login(
+ self.username,
+ str(getpass.getpass(f"Enter Password for {self.username}:")),
+ )
except:
self.oncat = None
@@ -36,7 +39,9 @@ class GetEverything:
def __init__(self, instrument="CG1D", facility="HFIR", run="", oncat=None):
run = self.__remove_leading_backslash(run)
- self.datafiles = oncat.Datafile.retrieve(run, facility=facility, instrument=instrument)
+ self.datafiles = oncat.Datafile.retrieve(
+ run, facility=facility, instrument=instrument
+ )
def __remove_leading_backslash(self, run):
return run[1:]
@@ -44,15 +49,25 @@ def __remove_leading_backslash(self, run):
class GetProjection:
def __init__(
- self, instrument="CG1D", facility="HFIR", list_files=[], oncat=None, projection=[], with_progressbar=False
+ self,
+ instrument="CG1D",
+ facility="HFIR",
+ list_files=[],
+ oncat=None,
+ projection=[],
+ with_progressbar=False,
):
projection.append("ingested")
if with_progressbar:
box1 = widgets.HBox(
[
- widgets.Label("Retrieving Metadata ...", layout=widgets.Layout(width="30%")),
- widgets.IntProgress(max=len(list_files), layout=widgets.Layout(width="70%")),
+ widgets.Label(
+ "Retrieving Metadata ...", layout=widgets.Layout(width="30%")
+ ),
+ widgets.IntProgress(
+ max=len(list_files), layout=widgets.Layout(width="70%")
+ ),
]
)
display(box1)
diff --git a/notebooks/__code/outliers_filtering/algorithm.py b/notebooks/__code/outliers_filtering/algorithm.py
index a8c47e50..03fa3cea 100755
--- a/notebooks/__code/outliers_filtering/algorithm.py
+++ b/notebooks/__code/outliers_filtering/algorithm.py
@@ -20,7 +20,9 @@ def __init__(self, parent=None, data=None):
self.parent = parent
self.data = copy.deepcopy(data)
self.processed_data = copy.deepcopy(data)
- self.total_number_of_pixels = self.parent.image_size[0] * self.parent.image_size[1]
+ self.total_number_of_pixels = (
+ self.parent.image_size[0] * self.parent.image_size[1]
+ )
if self.parent.ui.fix_dead_pixels_checkBox.isChecked():
self.is_dead_pixel_activated = True
@@ -49,7 +51,9 @@ def dead_pixels(self):
if mask:
nbr_pixels = len(mask[0])
self.dead_pixel_stats["number"] = nbr_pixels
- self.dead_pixel_stats["percentage"] = (nbr_pixels / self.total_number_of_pixels) * 100
+ self.dead_pixel_stats["percentage"] = (
+ nbr_pixels / self.total_number_of_pixels
+ ) * 100
self.data[mask] = self.median_data[mask]
def high_counts(self):
@@ -58,7 +62,9 @@ def high_counts(self):
if where_above_threshold:
nbr_pixels = len(where_above_threshold[0])
self.high_counts_stats["number"] = nbr_pixels
- self.high_counts_stats["percentage"] = (nbr_pixels / self.total_number_of_pixels) * 100
+ self.high_counts_stats["percentage"] = (
+ nbr_pixels / self.total_number_of_pixels
+ ) * 100
self.data[where_above_threshold] = self.median_data[where_above_threshold]
def get_dead_pixels_stats(self):
diff --git a/notebooks/__code/outliers_filtering/display.py b/notebooks/__code/outliers_filtering/display.py
index baeb291f..f8bc0b20 100755
--- a/notebooks/__code/outliers_filtering/display.py
+++ b/notebooks/__code/outliers_filtering/display.py
@@ -25,14 +25,20 @@ def raw_image(self, data):
self.parent.live_raw_image = _image
if not first_update:
- _histo_widget.setLevels(self.parent.raw_histogram_level[0], self.parent.raw_histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.raw_histogram_level[0], self.parent.raw_histogram_level[1]
+ )
# histogram
self.parent.ui.raw_histogram_plot.clear()
min = 0
max = np.max(_image)
- y, x = np.histogram(_image, bins=np.linspace(min, max + 1, self.parent.nbr_histo_bins))
- self.parent.ui.raw_histogram_plot.plot(x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150))
+ y, x = np.histogram(
+ _image, bins=np.linspace(min, max + 1, self.parent.nbr_histo_bins)
+ )
+ self.parent.ui.raw_histogram_plot.plot(
+ x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)
+ )
def filtered_image(self, data):
_view = self.parent.ui.filtered_image_view.getView()
@@ -54,14 +60,21 @@ def filtered_image(self, data):
self.parent.live_filtered_image = _image
if not first_update:
- _histo_widget.setLevels(self.parent.filtered_histogram_level[0], self.parent.filtered_histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.filtered_histogram_level[0],
+ self.parent.filtered_histogram_level[1],
+ )
# histogram
self.parent.ui.filtered_histogram_plot.clear()
min = 0
max = np.max(_image)
- y, x = np.histogram(_image, bins=np.linspace(min, max + 1, self.parent.nbr_histo_bins))
- self.parent.ui.filtered_histogram_plot.plot(x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150))
+ y, x = np.histogram(
+ _image, bins=np.linspace(min, max + 1, self.parent.nbr_histo_bins)
+ )
+ self.parent.ui.filtered_histogram_plot.plot(
+ x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)
+ )
# re-attaching the x and y axis
self.parent.ui.raw_image_view.view.getViewBox().setYLink("filtered_image")
diff --git a/notebooks/__code/outliers_filtering/event_handler.py b/notebooks/__code/outliers_filtering/event_handler.py
index a09d8acf..a2553cb6 100755
--- a/notebooks/__code/outliers_filtering/event_handler.py
+++ b/notebooks/__code/outliers_filtering/event_handler.py
@@ -11,7 +11,9 @@ def __init__(self, parent=None):
self.parent = parent
def algorithm_changed(self):
- high_intensity_status = self.parent.ui.fix_high_intensity_counts_checkBox.isChecked()
+ high_intensity_status = (
+ self.parent.ui.fix_high_intensity_counts_checkBox.isChecked()
+ )
self.parent.ui.median_thresholding_frame.setEnabled(high_intensity_status)
self.reset_table_infos()
self.table_selection_changed()
@@ -28,13 +30,18 @@ def table_selection_changed(self):
row_selected = o_table.get_row_selected()
short_file_name = self.parent.list_short_file_name[row_selected]
if self.parent.data.get(short_file_name, None) is None:
- self.parent.data[short_file_name] = {"raw": self.load_raw_data(row=row_selected), "filtered": None}
+ self.parent.data[short_file_name] = {
+ "raw": self.load_raw_data(row=row_selected),
+ "filtered": None,
+ }
if self.parent.image_size is None:
[height, width] = np.shape(self.parent.data[short_file_name]["raw"])
self.parent.image_size = [height, width]
- filtered_data = self.calculate_filtered_data(raw_data=self.parent.data[short_file_name]["raw"])
+ filtered_data = self.calculate_filtered_data(
+ raw_data=self.parent.data[short_file_name]["raw"]
+ )
o_display = Display(parent=self.parent)
self.parent.data[short_file_name]["filtered"] = filtered_data
@@ -63,18 +70,34 @@ def calculate_filtered_data(self, raw_data=None):
high_counts_stats = o_algo.get_high_counts_stats()
o_table.insert_item(
- row=row_selected, column=1, editable=False, value=high_counts_stats["number"], format_str="{:d}"
+ row=row_selected,
+ column=1,
+ editable=False,
+ value=high_counts_stats["number"],
+ format_str="{:d}",
)
o_table.insert_item(
- row=row_selected, column=2, editable=False, value=high_counts_stats["percentage"], format_str="{:.2f}"
+ row=row_selected,
+ column=2,
+ editable=False,
+ value=high_counts_stats["percentage"],
+ format_str="{:.2f}",
)
dead_pixels_stats = o_algo.get_dead_pixels_stats()
o_table.insert_item(
- row=row_selected, column=3, editable=False, value=dead_pixels_stats["number"], format_str="{:d}"
+ row=row_selected,
+ column=3,
+ editable=False,
+ value=dead_pixels_stats["number"],
+ format_str="{:d}",
)
o_table.insert_item(
- row=row_selected, column=4, editable=False, value=dead_pixels_stats["percentage"], format_str="{:.2f}"
+ row=row_selected,
+ column=4,
+ editable=False,
+ value=dead_pixels_stats["percentage"],
+ format_str="{:.2f}",
)
return o_algo.get_processed_data()
diff --git a/notebooks/__code/outliers_filtering/export.py b/notebooks/__code/outliers_filtering/export.py
index 401147a3..468eee33 100755
--- a/notebooks/__code/outliers_filtering/export.py
+++ b/notebooks/__code/outliers_filtering/export.py
@@ -21,7 +21,9 @@ def export(self):
)
if _export_folder:
- export_folder_name = os.path.join(_export_folder, str(base_folder.name) + "_outliers_corrected")
+ export_folder_name = os.path.join(
+ _export_folder, str(base_folder.name) + "_outliers_corrected"
+ )
export_folder_name = make_or_increment_folder_name(export_folder_name)
list_file = self.parent.list_files
o_event = EventHandler(parent=self.parent)
@@ -31,7 +33,9 @@ def export(self):
self.parent.eventProgress.setVisible(True)
for _row, _file in enumerate(list_file):
o_norm = o_event.load_data_object(file_name=_file)
- o_algo = Algorithm(parent=self.parent, data=np.squeeze(o_norm.data["sample"]["data"]))
+ o_algo = Algorithm(
+ parent=self.parent, data=np.squeeze(o_norm.data["sample"]["data"])
+ )
o_algo.run()
data_corrected = o_algo.get_processed_data()
o_norm.data["sample"]["data"][0] = data_corrected
diff --git a/notebooks/__code/outliers_filtering/initialization.py b/notebooks/__code/outliers_filtering/initialization.py
index d56361da..f881e651 100755
--- a/notebooks/__code/outliers_filtering/initialization.py
+++ b/notebooks/__code/outliers_filtering/initialization.py
@@ -36,7 +36,9 @@ def pyqtgraph(self):
area.moveDock(d2, "above", d2h)
# raw image
- self.parent.ui.raw_image_view = pg.ImageView(view=pg.PlotItem(), name="raw_image")
+ self.parent.ui.raw_image_view = pg.ImageView(
+ view=pg.PlotItem(), name="raw_image"
+ )
self.parent.ui.raw_image_view.ui.roiBtn.hide()
self.parent.ui.raw_image_view.ui.menuBtn.hide()
self.parent.ui.raw_image_view.view.setAutoVisible(y=True)
@@ -58,13 +60,19 @@ def pyqtgraph(self):
d1h.addWidget(self.parent.ui.raw_histogram_plot)
# filtered image
- self.parent.ui.filtered_image_view = pg.ImageView(view=pg.PlotItem(), name="filtered_image")
+ self.parent.ui.filtered_image_view = pg.ImageView(
+ view=pg.PlotItem(), name="filtered_image"
+ )
self.parent.ui.filtered_image_view.ui.roiBtn.hide()
self.parent.ui.filtered_image_view.ui.menuBtn.hide()
self.parent.filtered_vLine = pg.InfiniteLine(angle=90, movable=False)
self.parent.filtered_hLine = pg.InfiniteLine(angle=0, movable=False)
- self.parent.ui.filtered_image_view.addItem(self.parent.filtered_vLine, ignoreBounds=True)
- self.parent.ui.filtered_image_view.addItem(self.parent.filtered_hLine, ignoreBounds=True)
+ self.parent.ui.filtered_image_view.addItem(
+ self.parent.filtered_vLine, ignoreBounds=True
+ )
+ self.parent.ui.filtered_image_view.addItem(
+ self.parent.filtered_hLine, ignoreBounds=True
+ )
self.parent.filtered_vLine.setPos([1000, 1000])
self.parent.filtered_hLine.setPos([1000, 1000])
self.parent.filtered_proxy = pg.SignalProxy(
diff --git a/notebooks/__code/outliers_filtering/main.py b/notebooks/__code/outliers_filtering/main.py
index 8f8c7ac0..dd00d645 100755
--- a/notebooks/__code/outliers_filtering/main.py
+++ b/notebooks/__code/outliers_filtering/main.py
@@ -13,7 +13,9 @@
class InterfaceHandler(FileFolderBrowser):
def __init__(self, working_dir=""):
- super(InterfaceHandler, self).__init__(working_dir=working_dir, next_function=self.display_status)
+ super(InterfaceHandler, self).__init__(
+ working_dir=working_dir, next_function=self.display_status
+ )
def get_list_of_files(self):
return self.list_images_ui.selected
@@ -23,7 +25,13 @@ def select_all_images(self):
def display_status(self, list_of_files):
nbr_images = str(len(list_of_files))
- display(HTML('You have selected ' + nbr_images + " images "))
+ display(
+ HTML(
+ 'You have selected '
+ + nbr_images
+ + " images "
+ )
+ )
class Interface(QMainWindow):
@@ -135,7 +143,9 @@ def file_index_changed(self):
def help_clicked(self):
import webbrowser
- webbrowser.open("https://neutronimaging.ornl.gov/tutorials/imaging-notebooks/outliers-filtering-tool/")
+ webbrowser.open(
+ "https://neutronimaging.ornl.gov/tutorials/imaging-notebooks/outliers-filtering-tool/"
+ )
def display_image(self, image):
self.ui.image_view.setImage(image)
diff --git a/notebooks/__code/overlay_images/event_handler.py b/notebooks/__code/overlay_images/event_handler.py
index e4a88216..2a7634bc 100755
--- a/notebooks/__code/overlay_images/event_handler.py
+++ b/notebooks/__code/overlay_images/event_handler.py
@@ -15,11 +15,13 @@ def __init__(self, parent=None):
def update_views(self, row_selected=0):
self.update_view(
- image_resolution="high_res", data=self.parent.o_norm_high_res.data["sample"]["data"][row_selected]
+ image_resolution="high_res",
+ data=self.parent.o_norm_high_res.data["sample"]["data"][row_selected],
)
self.update_view(
- image_resolution="low_res", data=self.parent.o_norm_low_res.data["sample"]["data"][row_selected]
+ image_resolution="low_res",
+ data=self.parent.o_norm_low_res.data["sample"]["data"][row_selected],
)
if self.parent.resize_and_overlay_images:
@@ -147,7 +149,9 @@ def update_profile_markers_and_target(self, with_profile=False):
line_view_binning = pg.GraphItem()
image_view.addItem(line_view_binning)
- line_view_binning.setData(pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False)
+ line_view_binning.setData(
+ pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False
+ )
self.parent.markers["overlay"]["1"]["target_ui"] = line_view_binning
else:
if self.parent.markers["overlay"]["1"]["target_ui"]:
@@ -158,7 +162,9 @@ def update_target(self, image_resolution="high_res", target_index="1"):
image_view = self.parent.image_view[image_resolution]
if self.parent.markers[image_resolution][target_index]["target_ui"] is not None:
- image_view.removeItem(self.parent.markers[image_resolution][target_index]["target_ui"])
+ image_view.removeItem(
+ self.parent.markers[image_resolution][target_index]["target_ui"]
+ )
width = self.parent.markers["width"]
height = self.parent.markers["height"]
@@ -193,12 +199,22 @@ def update_target(self, image_resolution="high_res", target_index="1"):
line_color = self.parent.markers["target"]["color"][target_index]
lines = np.array(
[line_color for _ in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
line_view_binning = pg.GraphItem()
image_view.addItem(line_view_binning)
- line_view_binning.setData(pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False)
- self.parent.markers[image_resolution][target_index]["target_ui"] = line_view_binning
+ line_view_binning.setData(
+ pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False
+ )
+ self.parent.markers[image_resolution][target_index]["target_ui"] = (
+ line_view_binning
+ )
def get_marker_index_parameters(self, region_index="1"):
region = {
@@ -232,11 +248,15 @@ def overlay_stack_of_images_clicked(self):
scaling_factor = distance_h / distance_l
self.parent.ui.scaling_factor_lineEdit.setText(f"{scaling_factor:.2f}")
- [image_height, image_width] = np.shape(self.parent.o_norm_low_res.data["sample"]["data"][0])
+ [image_height, image_width] = np.shape(
+ self.parent.o_norm_low_res.data["sample"]["data"][0]
+ )
new_image_height = int(image_height * scaling_factor)
new_image_width = int(image_width * scaling_factor)
- self.parent.eventProgress.setMaximum(len(self.parent.o_norm_high_res.data["sample"]["data"]))
+ self.parent.eventProgress.setMaximum(
+ len(self.parent.o_norm_high_res.data["sample"]["data"])
+ )
self.parent.eventProgress.setValue(0)
self.parent.eventProgress.setVisible(True)
QtGui.QGuiApplication.processEvents()
@@ -265,8 +285,14 @@ def overlay_stack_of_images_clicked(self):
resize_hres_images = []
resize_lres_images = []
- for _row, _low_res_image in enumerate(self.parent.o_norm_low_res.data["sample"]["data"]):
- new_image = np.array(Image.fromarray(_low_res_image).resize((new_image_width, new_image_height)))
+ for _row, _low_res_image in enumerate(
+ self.parent.o_norm_low_res.data["sample"]["data"]
+ ):
+ new_image = np.array(
+ Image.fromarray(_low_res_image).resize(
+ (new_image_width, new_image_height)
+ )
+ )
resize_lres_images.append(copy.deepcopy(new_image))
high_res_image = self.get_full_high_res_image(
@@ -281,14 +307,18 @@ def overlay_stack_of_images_clicked(self):
resize_hres_images.append(high_res_image)
if _row == 0:
- self.parent.rescaled_low_res_height, self.parent.rescaled_low_res_width = np.shape(new_image)
+ (
+ self.parent.rescaled_low_res_height,
+ self.parent.rescaled_low_res_width,
+ ) = np.shape(new_image)
resize_and_overlay_modes.append("Auto")
o_table.set_item_with_str(row=_row, column=2, cell_str="Auto")
# add high resolution image
new_working_image = copy.deepcopy(new_image)
new_working_image[
- y_index_array_resized_array : y_index_array_resized_array + image_height,
+ y_index_array_resized_array : y_index_array_resized_array
+ + image_height,
x_index_array_resized_array : x_index_array_resized_array + image_width,
] = high_res_images[_row]
@@ -296,7 +326,10 @@ def overlay_stack_of_images_clicked(self):
self.parent.eventProgress.setValue(_row + 1)
QtGui.QGuiApplication.processEvents()
- self.parent.resize_hres_lres_images = {"lres": resize_lres_images, "hres": resize_hres_images}
+ self.parent.resize_hres_lres_images = {
+ "lres": resize_lres_images,
+ "hres": resize_hres_images,
+ }
self.parent.resize_and_overlay_images = resize_and_overlay_images
self.parent.resize_and_overlay_modes = resize_and_overlay_modes
@@ -321,19 +354,27 @@ def manual_overlay_of_selected_image_only(self):
self.parent.resize_and_overlay_modes[row_selected] = "Manual"
o_table.set_item_with_str(row=row_selected, column=2, cell_str="Manual")
- [image_height, image_width] = np.shape(self.parent.o_norm_low_res.data["sample"]["data"][0])
+ [image_height, image_width] = np.shape(
+ self.parent.o_norm_low_res.data["sample"]["data"][0]
+ )
new_image_height = int(image_height * scaling_factor)
new_image_width = int(image_width * scaling_factor)
x_index_array_resized_array = int(str(self.parent.ui.xoffset_lineEdit.text()))
y_index_array_resized_array = int(str(self.parent.ui.yoffset_lineEdit.text()))
resize_and_overlay_images = self.parent.resize_and_overlay_images
- _high_res_image = self.parent.o_norm_high_res.data["sample"]["data"][row_selected]
+ _high_res_image = self.parent.o_norm_high_res.data["sample"]["data"][
+ row_selected
+ ]
_low_res_image = self.parent.o_norm_low_res.data["sample"]["data"][row_selected]
- new_image = np.array(Image.fromarray(_low_res_image).resize((new_image_width, new_image_height)))
+ new_image = np.array(
+ Image.fromarray(_low_res_image).resize((new_image_width, new_image_height))
+ )
# self.parent.rescaled_low_res_height, self.parent.rescaled_low_res_width = np.shape(new_image)
- self.parent.resize_hres_lres_images["hres"][row_selected] = copy.deepcopy(new_image)
+ self.parent.resize_hres_lres_images["hres"][row_selected] = copy.deepcopy(
+ new_image
+ )
high_res_image = self.get_full_high_res_image(
_high_res_image,
image_height,
@@ -377,11 +418,15 @@ def get_full_high_res_image(
def manual_overlay_stack_of_images_clicked(self):
scaling_factor = float(str(self.parent.ui.scaling_factor_lineEdit.text()))
- [image_height, image_width] = np.shape(self.parent.o_norm_low_res.data["sample"]["data"][0])
+ [image_height, image_width] = np.shape(
+ self.parent.o_norm_low_res.data["sample"]["data"][0]
+ )
new_image_height = int(image_height * scaling_factor)
new_image_width = int(image_width * scaling_factor)
- self.parent.eventProgress.setMaximum(len(self.parent.o_norm_high_res.data["sample"]["data"]))
+ self.parent.eventProgress.setMaximum(
+ len(self.parent.o_norm_high_res.data["sample"]["data"])
+ )
self.parent.eventProgress.setValue(0)
self.parent.eventProgress.setVisible(True)
QtGui.QGuiApplication.processEvents()
@@ -397,12 +442,21 @@ def manual_overlay_stack_of_images_clicked(self):
resize_lres_images = []
high_res_images = self.parent.o_norm_high_res.data["sample"]["data"]
- for _row, _low_res_image in enumerate(self.parent.o_norm_low_res.data["sample"]["data"]):
- new_image = np.array(Image.fromarray(_low_res_image).resize((new_image_width, new_image_height)))
+ for _row, _low_res_image in enumerate(
+ self.parent.o_norm_low_res.data["sample"]["data"]
+ ):
+ new_image = np.array(
+ Image.fromarray(_low_res_image).resize(
+ (new_image_width, new_image_height)
+ )
+ )
resize_lres_images.append(copy.deepcopy(new_image))
if _row == 0:
- self.parent.rescaled_low_res_height, self.parent.rescaled_low_res_width = np.shape(new_image)
+ (
+ self.parent.rescaled_low_res_height,
+ self.parent.rescaled_low_res_width,
+ ) = np.shape(new_image)
resize_and_overlay_modes.append("Manual")
o_table.set_item_with_str(row=_row, column=2, cell_str="Manual")
@@ -420,7 +474,8 @@ def manual_overlay_stack_of_images_clicked(self):
# add high resolution image
new_working_image = copy.deepcopy(new_image)
new_working_image[
- y_index_array_resized_array : y_index_array_resized_array + image_height,
+ y_index_array_resized_array : y_index_array_resized_array
+ + image_height,
x_index_array_resized_array : x_index_array_resized_array + image_width,
] = high_res_images[_row]
resize_and_overlay_images.append(new_working_image)
@@ -430,7 +485,10 @@ def manual_overlay_stack_of_images_clicked(self):
self.parent.resize_and_overlay_images = resize_and_overlay_images
self.parent.resize_and_overlay_modes = resize_and_overlay_modes
- self.parent.resize_hres_lres_images = {"lres": resize_lres_images, "hres": resize_hres_images}
+ self.parent.resize_hres_lres_images = {
+ "lres": resize_lres_images,
+ "hres": resize_hres_images,
+ }
row_selected = o_table.get_row_selected()
@@ -458,15 +516,21 @@ def check_xoffset_manual_button_status(self):
status_minus_minus_button = False
elif xoffset_value < self.parent.DOUBLE_OFFSET:
status_minus_minus_button = False
- elif xoffset_value == (self.parent.rescaled_low_res_width - self.parent.high_res_image_width):
+ elif xoffset_value == (
+ self.parent.rescaled_low_res_width - self.parent.high_res_image_width
+ ):
status_plus_button = False
status_plus_plus_button = False
elif xoffset_value > (
- self.parent.rescaled_low_res_width - self.parent.high_res_image_width - self.parent.DOUBLE_OFFSET
+ self.parent.rescaled_low_res_width
+ - self.parent.high_res_image_width
+ - self.parent.DOUBLE_OFFSET
):
status_plus_plus_button = False
- self.parent.ui.xoffset_minus_minus_pushButton.setEnabled(status_minus_minus_button)
+ self.parent.ui.xoffset_minus_minus_pushButton.setEnabled(
+ status_minus_minus_button
+ )
self.parent.ui.xoffset_minus_pushButton.setEnabled(status_minus_button)
self.parent.ui.xoffset_plus_pushButton.setEnabled(status_plus_button)
self.parent.ui.xoffset_plus_plus_pushButton.setEnabled(status_plus_plus_button)
@@ -483,15 +547,21 @@ def check_yoffset_manual_button_status(self):
status_minus_minus_button = False
elif yoffset_value < self.parent.DOUBLE_OFFSET:
status_minus_minus_button = False
- elif yoffset_value == (self.parent.rescaled_low_res_height - self.parent.high_res_image_height):
+ elif yoffset_value == (
+ self.parent.rescaled_low_res_height - self.parent.high_res_image_height
+ ):
status_plus_button = False
status_plus_plus_button = False
elif yoffset_value > (
- self.parent.rescaled_low_res_height - self.parent.high_res_image_height - self.parent.DOUBLE_OFFSET
+ self.parent.rescaled_low_res_height
+ - self.parent.high_res_image_height
+ - self.parent.DOUBLE_OFFSET
):
status_plus_plus_button = False
- self.parent.ui.yoffset_minus_minus_pushButton.setEnabled(status_minus_minus_button)
+ self.parent.ui.yoffset_minus_minus_pushButton.setEnabled(
+ status_minus_minus_button
+ )
self.parent.ui.yoffset_minus_pushButton.setEnabled(status_minus_button)
self.parent.ui.yoffset_plus_pushButton.setEnabled(status_plus_button)
self.parent.ui.yoffset_plus_plus_pushButton.setEnabled(status_plus_plus_button)
@@ -502,7 +572,9 @@ def update_profile_plots(self):
return
o_get = Get(parent=self.parent)
- overlay_1_dict = o_get.marker_location(image_resolution="overlay", target_index="1")
+ overlay_1_dict = o_get.marker_location(
+ image_resolution="overlay", target_index="1"
+ )
width = self.parent.markers["width"]
height = self.parent.markers["height"]
@@ -546,7 +618,9 @@ def update_profile_plots(self):
self.parent.horizontal_profile_plot.axes.plot(
x_axis, horizontal_profile_high_res, "-b", label="high resolution"
)
- self.parent.horizontal_profile_plot.axes.plot(x_axis, horizontal_profile_low_res, "--b", label="low resolution")
+ self.parent.horizontal_profile_plot.axes.plot(
+ x_axis, horizontal_profile_low_res, "--b", label="low resolution"
+ )
self.parent.horizontal_profile_plot.axes.legend()
self.parent.horizontal_profile_plot.draw()
@@ -559,8 +633,12 @@ def update_profile_plots(self):
self.parent.vertical_profile_plot.axes.clear()
self.parent.vertical_profile_plot.draw()
- self.parent.vertical_profile_plot.axes.plot(y_axis, vertical_profile_high_res, "-r", label="high resolution")
- self.parent.vertical_profile_plot.axes.plot(y_axis, vertical_profile_low_res, "--r", label="low resolution")
+ self.parent.vertical_profile_plot.axes.plot(
+ y_axis, vertical_profile_high_res, "-r", label="high resolution"
+ )
+ self.parent.vertical_profile_plot.axes.plot(
+ y_axis, vertical_profile_low_res, "--r", label="low resolution"
+ )
self.parent.vertical_profile_plot.axes.legend()
self.parent.vertical_profile_plot.draw()
@@ -578,7 +656,11 @@ def save_overlay_parameters(self):
sf = str(self.parent.ui.scaling_factor_lineEdit.text())
xoffset = str(self.parent.ui.xoffset_lineEdit.text())
yoffset = str(self.parent.ui.yoffset_lineEdit.text())
- self.parent.parameters_used_on_all_images = {"scaling_factor": sf, "xoffset": xoffset, "yoffset": yoffset}
+ self.parent.parameters_used_on_all_images = {
+ "scaling_factor": sf,
+ "xoffset": xoffset,
+ "yoffset": yoffset,
+ }
self.parent.ui.export_pushButton.setEnabled(True)
def check_export_button_status(self):
diff --git a/notebooks/__code/overlay_images/export.py b/notebooks/__code/overlay_images/export.py
index 572c14a5..e601b917 100755
--- a/notebooks/__code/overlay_images/export.py
+++ b/notebooks/__code/overlay_images/export.py
@@ -13,12 +13,18 @@ def __init__(self, parent=None):
def run(self):
working_dir = os.path.abspath(os.path.dirname(self.parent.working_dir))
- export_folder = QFileDialog.getExistingDirectory(self.parent, caption="Select folder", directory=working_dir)
+ export_folder = QFileDialog.getExistingDirectory(
+ self.parent, caption="Select folder", directory=working_dir
+ )
if export_folder:
# make own folder where the data will be exported
- short_high_res_input_folder = os.path.basename(self.parent.high_res_input_folder)
- short_low_res_input_folder = os.path.basename(self.parent.low_res_input_folder)
+ short_high_res_input_folder = os.path.basename(
+ self.parent.high_res_input_folder
+ )
+ short_low_res_input_folder = os.path.basename(
+ self.parent.low_res_input_folder
+ )
output_folder = f"{short_low_res_input_folder}_and_{short_high_res_input_folder}_overlaid"
full_output_folder = os.path.join(export_folder, output_folder)
make_or_reset_folder(full_output_folder)
diff --git a/notebooks/__code/overlay_images/initialization.py b/notebooks/__code/overlay_images/initialization.py
index 99173043..4ff6420b 100755
--- a/notebooks/__code/overlay_images/initialization.py
+++ b/notebooks/__code/overlay_images/initialization.py
@@ -20,9 +20,13 @@ def __init__(self, parent=None):
def dictionaries(self):
list_high_res_files = self.parent.o_norm_high_res.data["sample"]["file_name"]
- list_high_res_files_basename = [os.path.basename(_file) for _file in list_high_res_files]
+ list_high_res_files_basename = [
+ os.path.basename(_file) for _file in list_high_res_files
+ ]
list_low_res_files = self.parent.o_norm_low_res.data["sample"]["file_name"]
- list_low_res_files_basename = [os.path.basename(_file) for _file in list_low_res_files]
+ list_low_res_files_basename = [
+ os.path.basename(_file) for _file in list_low_res_files
+ ]
dict_offsets = OrderedDict()
for _index, _filename in enumerate(list_high_res_files_basename):
@@ -42,8 +46,12 @@ def widgets(self):
list_high_res_files = self.parent.o_norm_high_res.data["sample"]["file_name"]
list_low_res_files = self.parent.o_norm_low_res.data["sample"]["file_name"]
- list_high_res_files_basename = [os.path.basename(_file) for _file in list_high_res_files]
- list_low_res_files_basename = [os.path.basename(_file) for _file in list_low_res_files]
+ list_high_res_files_basename = [
+ os.path.basename(_file) for _file in list_high_res_files
+ ]
+ list_low_res_files_basename = [
+ os.path.basename(_file) for _file in list_low_res_files
+ ]
resize_and_overlay_modes = []
@@ -53,7 +61,9 @@ def widgets(self):
list_high_res_files_basename, list_low_res_files_basename, strict=False
):
o_table.insert_empty_row(row=_row)
- o_table.insert_item(row=_row, column=0, value=_high_res_file, editable=False)
+ o_table.insert_item(
+ row=_row, column=0, value=_high_res_file, editable=False
+ )
o_table.insert_item(row=_row, column=1, value=_low_res_file, editable=False)
o_table.insert_item(row=_row, column=2, value="None")
resize_and_overlay_modes.append("None")
@@ -116,7 +126,14 @@ def pyqtgraph(self):
image_layout.addWidget(self.parent.image_view["overlay"])
self.parent.ui.overlay_widget.setLayout(image_layout)
- def _design_marker(self, image_resolution=None, target_index=None, pen=None, image_view=None, method=None):
+ def _design_marker(
+ self,
+ image_resolution=None,
+ target_index=None,
+ pen=None,
+ image_view=None,
+ method=None,
+ ):
x = self.parent.markers[image_resolution][target_index]["x"]
y = self.parent.markers[image_resolution][target_index]["y"]
image_view = self.parent.image_view[image_resolution]
@@ -139,19 +156,31 @@ def markers(self):
blue_pen.setWidthF(0.05)
self.parent.markers["high_res"]["1"]["ui"] = self._design_marker(
- image_resolution="high_res", target_index="1", pen=red_pen, method=self.parent.markers_changed
+ image_resolution="high_res",
+ target_index="1",
+ pen=red_pen,
+ method=self.parent.markers_changed,
)
self.parent.markers["high_res"]["2"]["ui"] = self._design_marker(
- image_resolution="high_res", target_index="2", pen=blue_pen, method=self.parent.markers_changed
+ image_resolution="high_res",
+ target_index="2",
+ pen=blue_pen,
+ method=self.parent.markers_changed,
)
self.parent.markers["low_res"]["1"]["ui"] = self._design_marker(
- image_resolution="low_res", target_index="1", pen=red_pen, method=self.parent.markers_changed
+ image_resolution="low_res",
+ target_index="1",
+ pen=red_pen,
+ method=self.parent.markers_changed,
)
self.parent.markers["low_res"]["2"]["ui"] = self._design_marker(
- image_resolution="low_res", target_index="2", pen=blue_pen, method=self.parent.markers_changed
+ image_resolution="low_res",
+ target_index="2",
+ pen=blue_pen,
+ method=self.parent.markers_changed,
)
o_event = EventHandler(parent=self.parent)
diff --git a/notebooks/__code/overlay_images/interface_handler.py b/notebooks/__code/overlay_images/interface_handler.py
index 742b952f..9deccdf8 100755
--- a/notebooks/__code/overlay_images/interface_handler.py
+++ b/notebooks/__code/overlay_images/interface_handler.py
@@ -13,9 +13,15 @@
class InterfaceHandler:
def __init__(self, working_dir=None, o_norm_high_res=None, o_norm_low_res=None):
- assert len(o_norm_low_res.data["sample"]["file_name"]) == len(o_norm_high_res.data["sample"]["file_name"])
+ assert len(o_norm_low_res.data["sample"]["file_name"]) == len(
+ o_norm_high_res.data["sample"]["file_name"]
+ )
- o_interface = Interface(o_norm_high_res=o_norm_high_res, o_norm_low_res=o_norm_low_res, working_dir=working_dir)
+ o_interface = Interface(
+ o_norm_high_res=o_norm_high_res,
+ o_norm_low_res=o_norm_low_res,
+ working_dir=working_dir,
+ )
o_interface.show()
self.o_interface = o_interface
@@ -77,23 +83,34 @@ class Interface(QMainWindow):
# if any of the current parameter is different from this, the EXPORT button becomes unavailable
parameters_used_on_all_images = {"scaling_factor": 0, "xoffset": 0, "yoffset": 0}
- def __init__(self, parent=None, o_norm_high_res=None, o_norm_low_res=None, working_dir=None):
+ def __init__(
+ self, parent=None, o_norm_high_res=None, o_norm_low_res=None, working_dir=None
+ ):
self.o_norm_high_res = o_norm_high_res
self.o_norm_low_res = o_norm_low_res
self.working_dir = working_dir if working_dir else "./"
- self.high_res_image_height, self.high_res_image_width = np.shape(o_norm_high_res.data["sample"]["data"][0])
- self.low_res_image_height, self.low_res_image_width = np.shape(o_norm_low_res.data["sample"]["data"][0])
+ self.high_res_image_height, self.high_res_image_width = np.shape(
+ o_norm_high_res.data["sample"]["data"][0]
+ )
+ self.low_res_image_height, self.low_res_image_width = np.shape(
+ o_norm_low_res.data["sample"]["data"][0]
+ )
self.rescaled_low_res_height, self.rescaled_low_res_width = None, None
self.list_of_high_res_filename = o_norm_high_res.data["sample"]["file_name"]
- self.high_res_input_folder = os.path.dirname(o_norm_high_res.data["sample"]["file_name"][0])
- self.low_res_input_folder = os.path.dirname(o_norm_low_res.data["sample"]["file_name"][0])
+ self.high_res_input_folder = os.path.dirname(
+ o_norm_high_res.data["sample"]["file_name"][0]
+ )
+ self.low_res_input_folder = os.path.dirname(
+ o_norm_low_res.data["sample"]["file_name"][0]
+ )
super(Interface, self).__init__(parent)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), os.path.join("ui", "ui_overlay.ui")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ os.path.join("ui", "ui_overlay.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Overlay images with scaling")
@@ -126,19 +143,27 @@ def list_files_table_selection_changed(self):
def markers_changed(self):
o_get = Get(parent=self)
- high_res_1_dict = o_get.marker_location(image_resolution="high_res", target_index="1")
+ high_res_1_dict = o_get.marker_location(
+ image_resolution="high_res", target_index="1"
+ )
self.markers["high_res"]["1"]["x"] = high_res_1_dict["x"]
self.markers["high_res"]["1"]["y"] = high_res_1_dict["y"]
- high_res_2_dict = o_get.marker_location(image_resolution="high_res", target_index="2")
+ high_res_2_dict = o_get.marker_location(
+ image_resolution="high_res", target_index="2"
+ )
self.markers["high_res"]["2"]["x"] = high_res_2_dict["x"]
self.markers["high_res"]["2"]["y"] = high_res_2_dict["y"]
- low_res_1_dict = o_get.marker_location(image_resolution="low_res", target_index="1")
+ low_res_1_dict = o_get.marker_location(
+ image_resolution="low_res", target_index="1"
+ )
self.markers["low_res"]["1"]["x"] = low_res_1_dict["x"]
self.markers["low_res"]["1"]["y"] = low_res_1_dict["y"]
- low_res_2_dict = o_get.marker_location(image_resolution="low_res", target_index="2")
+ low_res_2_dict = o_get.marker_location(
+ image_resolution="low_res", target_index="2"
+ )
self.markers["low_res"]["2"]["x"] = low_res_2_dict["x"]
self.markers["low_res"]["2"]["y"] = low_res_2_dict["y"]
@@ -268,7 +293,9 @@ def profile_tool_clicked(self):
def profile_region_moved(self):
o_get = Get(parent=self)
- overlay_1_dict = o_get.marker_location(image_resolution="overlay", target_index="1")
+ overlay_1_dict = o_get.marker_location(
+ image_resolution="overlay", target_index="1"
+ )
self.markers["overlay"]["1"]["x"] = overlay_1_dict["x"]
self.markers["overlay"]["1"]["y"] = overlay_1_dict["y"]
diff --git a/notebooks/__code/panoramic_stitching/automatically_stitch.py b/notebooks/__code/panoramic_stitching/automatically_stitch.py
index 50e6faed..ac9b7049 100755
--- a/notebooks/__code/panoramic_stitching/automatically_stitch.py
+++ b/notebooks/__code/panoramic_stitching/automatically_stitch.py
@@ -21,7 +21,9 @@ def run(self):
group_selected = o_get.get_combobox_folder_selected()
# first calculate the long and lift position versus pixel coefficient from the ref. group
- group_reference_offset_dictionary = self.parent.offset_dictionary[group_selected]
+ group_reference_offset_dictionary = self.parent.offset_dictionary[
+ group_selected
+ ]
group_reference_data_dictionary = self.parent.data_dictionary[group_selected]
list_files = group_reference_offset_dictionary.keys()
@@ -29,8 +31,12 @@ def run(self):
list_pixel_vs_motor_lift_axis_value = []
for _file_index, _file in enumerate(list_files):
- long_axis_value = group_reference_data_dictionary[_file].metadata["MotLongAxis.RBV"]
- lift_axis_value = group_reference_data_dictionary[_file].metadata["MotLiftTable.RBV"]
+ long_axis_value = group_reference_data_dictionary[_file].metadata[
+ "MotLongAxis.RBV"
+ ]
+ lift_axis_value = group_reference_data_dictionary[_file].metadata[
+ "MotLiftTable.RBV"
+ ]
if _file_index == 0:
long_axis_reference_value = long_axis_value
@@ -75,15 +81,29 @@ def run(self):
# get xoffset and yofffset pixel/motor position of each image of reference group
for _file_index, _file in enumerate(list_files):
if _file_index == 0:
- long_axis_value_image_reference = data_dictionary[_file].metadata["MotLongAxis.RBV"]
- lift_axis_value_image_reference = data_dictionary[_file].metadata["MotLiftTable.RBV"]
+ long_axis_value_image_reference = data_dictionary[_file].metadata[
+ "MotLongAxis.RBV"
+ ]
+ lift_axis_value_image_reference = data_dictionary[_file].metadata[
+ "MotLiftTable.RBV"
+ ]
continue
- long_axis_value = data_dictionary[_file].metadata["MotLongAxis.RBV"] - long_axis_value_image_reference
- lift_axis_value = data_dictionary[_file].metadata["MotLiftTable.RBV"] - lift_axis_value_image_reference
-
- xoffset_of_this_file = int(long_axis_value * list_pixel_vs_motor_long_axis_value[_file_index])
- yoffset_of_this_file = int(lift_axis_value * list_pixel_vs_motor_lift_axis_value[_file_index])
+ long_axis_value = (
+ data_dictionary[_file].metadata["MotLongAxis.RBV"]
+ - long_axis_value_image_reference
+ )
+ lift_axis_value = (
+ data_dictionary[_file].metadata["MotLiftTable.RBV"]
+ - lift_axis_value_image_reference
+ )
+
+ xoffset_of_this_file = int(
+ long_axis_value * list_pixel_vs_motor_long_axis_value[_file_index]
+ )
+ yoffset_of_this_file = int(
+ lift_axis_value * list_pixel_vs_motor_lift_axis_value[_file_index]
+ )
group_offset_dictionary[_file]["xoffset"] = xoffset_of_this_file
group_offset_dictionary[_file]["yoffset"] = yoffset_of_this_file
diff --git a/notebooks/__code/panoramic_stitching/config_buttons.py b/notebooks/__code/panoramic_stitching/config_buttons.py
index db128a6a..4344da71 100755
--- a/notebooks/__code/panoramic_stitching/config_buttons.py
+++ b/notebooks/__code/panoramic_stitching/config_buttons.py
@@ -1,13 +1,40 @@
button = {
- "left": {"pressed": "left_arrow_v2_pressed.png", "released": "left_arrow_v2_released.png"},
- "right": {"pressed": "right_arrow_v2_pressed.png", "released": "right_arrow_v2_released.png"},
- "left_left": {"pressed": "left_left_arrow_v2_pressed.png", "released": "left_left_arrow_v2_released.png"},
- "right_right": {"pressed": "right_right_arrow_v2_pressed.png", "released": "right_right_arrow_v2_released.png"},
- "up": {"pressed": "up_arrow_v2_pressed.png", "released": "up_arrow_v2_released.png"},
- "down": {"pressed": "down_arrow_v2_pressed.png", "released": "down_arrow_v2_released.png"},
- "up_up": {"pressed": "up_up_arrow_v2_pressed.png", "released": "up_up_arrow_v2_released.png"},
- "down_down": {"pressed": "down_down_arrow_v2_pressed.png", "released": "down_down_arrow_v2_released.png"},
- "remote_control": {"pressed": "remote_control_pressed.png", "released": "remote_control_released.png"},
+ "left": {
+ "pressed": "left_arrow_v2_pressed.png",
+ "released": "left_arrow_v2_released.png",
+ },
+ "right": {
+ "pressed": "right_arrow_v2_pressed.png",
+ "released": "right_arrow_v2_released.png",
+ },
+ "left_left": {
+ "pressed": "left_left_arrow_v2_pressed.png",
+ "released": "left_left_arrow_v2_released.png",
+ },
+ "right_right": {
+ "pressed": "right_right_arrow_v2_pressed.png",
+ "released": "right_right_arrow_v2_released.png",
+ },
+ "up": {
+ "pressed": "up_arrow_v2_pressed.png",
+ "released": "up_arrow_v2_released.png",
+ },
+ "down": {
+ "pressed": "down_arrow_v2_pressed.png",
+ "released": "down_arrow_v2_released.png",
+ },
+ "up_up": {
+ "pressed": "up_up_arrow_v2_pressed.png",
+ "released": "up_up_arrow_v2_released.png",
+ },
+ "down_down": {
+ "pressed": "down_down_arrow_v2_pressed.png",
+ "released": "down_down_arrow_v2_released.png",
+ },
+ "remote_control": {
+ "pressed": "remote_control_pressed.png",
+ "released": "remote_control_released.png",
+ },
"bring_to_focus": {
"pressed": "panoramic_bring_into_focus_pressed.png",
"released": "panoramic_bring_into_focus_released.png",
diff --git a/notebooks/__code/panoramic_stitching/data_initialization.py b/notebooks/__code/panoramic_stitching/data_initialization.py
index 213cebe1..322fb2ba 100755
--- a/notebooks/__code/panoramic_stitching/data_initialization.py
+++ b/notebooks/__code/panoramic_stitching/data_initialization.py
@@ -35,7 +35,9 @@ def offset_table(self):
xoffset = 0
yoffset = 0
else:
- if current_metadata.get(X_METADATA_NAME, 0) < previous_metadata.get(X_METADATA_NAME, 0):
+ if current_metadata.get(X_METADATA_NAME, 0) < previous_metadata.get(
+ X_METADATA_NAME, 0
+ ):
xoffset = 0
yoffset += image_height
else:
diff --git a/notebooks/__code/panoramic_stitching/event_handler.py b/notebooks/__code/panoramic_stitching/event_handler.py
index 3da6e17f..95b27253 100755
--- a/notebooks/__code/panoramic_stitching/event_handler.py
+++ b/notebooks/__code/panoramic_stitching/event_handler.py
@@ -6,7 +6,16 @@
import pyqtgraph as pg
from qtpy import QtCore, QtGui
from qtpy.QtGui import QIcon
-from qtpy.QtWidgets import QApplication, QCheckBox, QFileDialog, QHBoxLayout, QMenu, QSizePolicy, QSpacerItem, QWidget
+from qtpy.QtWidgets import (
+ QApplication,
+ QCheckBox,
+ QFileDialog,
+ QHBoxLayout,
+ QMenu,
+ QSizePolicy,
+ QSpacerItem,
+ QWidget,
+)
from __code._utilities.table_handler import TableHandler
from __code._utilities.widgets_handler import WidgetsHandler
@@ -15,7 +24,10 @@
from __code.panoramic_stitching.gui_handler import GuiHandler
from __code.panoramic_stitching.gui_initialization import GuiInitialization
from __code.panoramic_stitching.image_handler import ImageHandler
-from __code.panoramic_stitching.status_message_config import StatusMessageStatus, show_status_message
+from __code.panoramic_stitching.status_message_config import (
+ StatusMessageStatus,
+ show_status_message,
+)
from __code.panoramic_stitching.utilities import make_full_file_name_to_static_folder_of
@@ -78,11 +90,18 @@ def list_folder_combobox_value_changed(self, new_folder_selected=None):
else:
editable_flag = editable_columns_boolean[_column_index]
- o_table.insert_item(row=_row_index, column=_column_index, value=_text, editable=editable_flag)
+ o_table.insert_item(
+ row=_row_index,
+ column=_column_index,
+ value=_text,
+ editable=editable_flag,
+ )
# checkbox to turn on/off visibility of the row
hori_layout = QHBoxLayout()
- spacer_item_left = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacer_item_left = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
hori_layout.addItem(spacer_item_left)
check_box = QCheckBox()
if offset_file_entry["visible"]:
@@ -92,10 +111,14 @@ def list_folder_combobox_value_changed(self, new_folder_selected=None):
check_box.setCheckState(_state)
check_box.stateChanged.connect(
- lambda state=0, row=_row_index: self.parent.visibility_checkbox_changed(state=state, row=row)
+ lambda state=0, row=_row_index: self.parent.visibility_checkbox_changed(
+ state=state, row=row
+ )
)
hori_layout.addWidget(check_box)
- spacer_item_right = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacer_item_right = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
hori_layout.addItem(spacer_item_right)
cell_widget = QWidget()
cell_widget.setLayout(hori_layout)
@@ -121,7 +144,9 @@ def check_status_of_from_to_checkbox(self):
self.parent.ui.from_to_button.setEnabled(False)
self.parent.ui.from_to_error_label.setVisible(False)
if self.parent.remote_control_id:
- self.parent.remote_control_id.ui.move_active_image_pushButton.setEnabled(False)
+ self.parent.remote_control_id.ui.move_active_image_pushButton.setEnabled(
+ False
+ )
else:
if row_selected == 0:
state = False
@@ -144,7 +169,9 @@ def roi_box_changed(
self,
roi_id=None,
):
- region = roi_id.getArraySlice(self.parent.current_live_image, self.parent.ui.image_view.imageItem)
+ region = roi_id.getArraySlice(
+ self.parent.current_live_image, self.parent.ui.image_view.imageItem
+ )
x0 = region[0][0].start
y0 = region[0][1].start
@@ -183,12 +210,16 @@ def from_to_button_pushed(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
- current_xoffset_of_selected_row = int(o_table.get_item_str_from_cell(row=row_selected, column=1))
+ current_xoffset_of_selected_row = int(
+ o_table.get_item_str_from_cell(row=row_selected, column=1)
+ )
new_xoffset = int(current_xoffset_of_selected_row - delta_x)
self.parent.ui.tableWidget.item(row_selected, 1).setText(str(new_xoffset))
self.save_table_offset_of_this_cell(row=row_selected, column=1)
- current_yoffset_of_selected_row = int(o_table.get_item_str_from_cell(row=row_selected, column=2))
+ current_yoffset_of_selected_row = int(
+ o_table.get_item_str_from_cell(row=row_selected, column=2)
+ )
new_yoffset = current_yoffset_of_selected_row - delta_y
self.parent.ui.tableWidget.item(row_selected, 2).setText(str(new_yoffset))
self.save_table_offset_of_this_cell(row=row_selected, column=2)
@@ -283,7 +314,9 @@ def manual_offset_changed(self, direction="horizontal", nbr_pixel=1):
current_offset = o_table.get_item_str_from_cell(row=row_selected, column=column)
new_offset = int(current_offset) + nbr_pixel
- o_table.set_item_with_str(row=row_selected, column=column, cell_str=str(new_offset))
+ o_table.set_item_with_str(
+ row=row_selected, column=column, cell_str=str(new_offset)
+ )
self.parent.table_of_offset_cell_changed(row_selected, column)
@@ -422,7 +455,9 @@ def export_table(self, table_file_name=""):
self.parent.save_as_table_file_name = table_file_name
self.parent.ui.actionSave_Table.setEnabled(True)
- self.parent.ui.actionSave_Table.setText(f"Save ({os.path.basename(table_file_name)})")
+ self.parent.ui.actionSave_Table.setText(
+ f"Save ({os.path.basename(table_file_name)})"
+ )
def make_table_dict(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
@@ -430,7 +465,10 @@ def make_table_dict(self):
nbr_column = 2
my_dictionary = {}
for _row in np.arange(nbr_row):
- local_list = [o_table.get_item_str_from_cell(_row, _column) for _column in (np.arange(nbr_column) + 1)]
+ local_list = [
+ o_table.get_item_str_from_cell(_row, _column)
+ for _column in (np.arange(nbr_column) + 1)
+ ]
my_dictionary[str(_row)] = local_list
return my_dictionary
@@ -440,10 +478,14 @@ def update_remote_ui(self):
@staticmethod
def button_pressed(ui=None, name="left"):
- full_file = make_full_file_name_to_static_folder_of(config.button[name]["pressed"])
+ full_file = make_full_file_name_to_static_folder_of(
+ config.button[name]["pressed"]
+ )
ui.setIcon(QIcon(full_file))
@staticmethod
def button_released(ui=None, name="left"):
- full_file = make_full_file_name_to_static_folder_of(config.button[name]["released"])
+ full_file = make_full_file_name_to_static_folder_of(
+ config.button[name]["released"]
+ )
ui.setIcon(QIcon(full_file))
diff --git a/notebooks/__code/panoramic_stitching/export.py b/notebooks/__code/panoramic_stitching/export.py
index c07f1139..f349f8b7 100755
--- a/notebooks/__code/panoramic_stitching/export.py
+++ b/notebooks/__code/panoramic_stitching/export.py
@@ -27,7 +27,8 @@ def select_output_folder(self):
output_folder = QFileDialog.getExistingDirectory(
self.parent,
directory=self.parent.working_dir,
- caption="Select where the folder containing the " "panoramic images will be created!",
+ caption="Select where the folder containing the "
+ "panoramic images will be created!",
options=QFileDialog.ShowDirsOnly,
)
if output_folder:
@@ -72,43 +73,58 @@ def create_panoramic_images(self, output_folder=None):
if _file_index == 0:
panoramic_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = image
continue
temp_big_image = np.zeros((panoramic_height, panoramic_width))
temp_big_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = image
# where_panoramic_image_has_value_only = np.where((panoramic_image != 0) & (temp_big_image == 0))
- where_temp_big_image_has_value_only = np.where((temp_big_image != 0) & (panoramic_image == 0))
- where_both_images_overlap = np.where((panoramic_image != 0) & (temp_big_image != 0))
+ where_temp_big_image_has_value_only = np.where(
+ (temp_big_image != 0) & (panoramic_image == 0)
+ )
+ where_both_images_overlap = np.where(
+ (panoramic_image != 0) & (temp_big_image != 0)
+ )
if stitching_algorithm == StitchingAlgorithmType.minimum:
- panoramic_image[where_temp_big_image_has_value_only] = temp_big_image[
- where_temp_big_image_has_value_only
- ]
+ panoramic_image[where_temp_big_image_has_value_only] = (
+ temp_big_image[where_temp_big_image_has_value_only]
+ )
panoramic_image[where_both_images_overlap] = np.minimum(
- panoramic_image[where_both_images_overlap], temp_big_image[where_both_images_overlap]
+ panoramic_image[where_both_images_overlap],
+ temp_big_image[where_both_images_overlap],
)
elif stitching_algorithm == StitchingAlgorithmType.maximum:
- panoramic_image[where_temp_big_image_has_value_only] = temp_big_image[
- where_temp_big_image_has_value_only
- ]
+ panoramic_image[where_temp_big_image_has_value_only] = (
+ temp_big_image[where_temp_big_image_has_value_only]
+ )
panoramic_image[where_both_images_overlap] = np.maximum(
- panoramic_image[where_both_images_overlap], temp_big_image[where_both_images_overlap]
+ panoramic_image[where_both_images_overlap],
+ temp_big_image[where_both_images_overlap],
)
elif stitching_algorithm == StitchingAlgorithmType.mean:
- panoramic_image[where_temp_big_image_has_value_only] = temp_big_image[
- where_temp_big_image_has_value_only
- ]
+ panoramic_image[where_temp_big_image_has_value_only] = (
+ temp_big_image[where_temp_big_image_has_value_only]
+ )
panoramic_image[where_both_images_overlap] = (
- panoramic_image[where_both_images_overlap] + temp_big_image[where_both_images_overlap]
+ panoramic_image[where_both_images_overlap]
+ + temp_big_image[where_both_images_overlap]
) / 2
panoramic_images_dict[_group_name] = panoramic_image
@@ -122,9 +138,14 @@ def create_panoramic_images(self, output_folder=None):
def export_images(self, output_folder=None):
stitching_algorithm = self.parent.stitching_algorithm
- new_folder_name = os.path.basename(self.parent.working_dir) + f"_panoramic_{stitching_algorithm}"
+ new_folder_name = (
+ os.path.basename(self.parent.working_dir)
+ + f"_panoramic_{stitching_algorithm}"
+ )
self.parent.ui.statusbar.setStyleSheet("color: blue")
- self.parent.ui.statusbar.showMessage(f"Exporting images in folder {new_folder_name}")
+ self.parent.ui.statusbar.showMessage(
+ f"Exporting images in folder {new_folder_name}"
+ )
QtGui.QGuiApplication.processEvents()
new_output_folder_name = os.path.join(output_folder, new_folder_name)
@@ -142,7 +163,9 @@ def export_images(self, output_folder=None):
o_norm.data["sample"]["filename"] = list_filename
o_norm.export(new_output_folder_name, data_type="sample")
self.parent.ui.statusbar.setStyleSheet("color: green")
- self.parent.ui.statusbar.showMessage(f"{new_output_folder_name} has been created!", 10000) # 10s
+ self.parent.ui.statusbar.showMessage(
+ f"{new_output_folder_name} has been created!", 10000
+ ) # 10s
class SelectStitchingAlgorithm(QDialog):
@@ -162,7 +185,9 @@ def __init__(self, top_parent=None, parent=None):
def display_plot(self):
self.top_parent.ui.statusbar.setStyleSheet("color: blue")
- self.top_parent.ui.statusbar.showMessage("Calculating previews of current working group ...")
+ self.top_parent.ui.statusbar.showMessage(
+ "Calculating previews of current working group ..."
+ )
QtGui.QGuiApplication.processEvents()
o_get = Get(parent=self.top_parent)
@@ -192,16 +217,28 @@ def display_plot(self):
if _file_index == 0:
minimum_panoramic_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = image
maximum_panoramic_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = image
mean_panoramic_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = image
continue
@@ -212,29 +249,36 @@ def display_plot(self):
] = image
# where_panoramic_image_has_value_only = np.where((panoramic_image != 0) & (temp_big_image == 0))
- where_temp_big_image_has_value_only = np.where((temp_big_image != 0) & (minimum_panoramic_image == 0))
- where_both_images_overlap = np.where((minimum_panoramic_image != 0) & (temp_big_image != 0))
+ where_temp_big_image_has_value_only = np.where(
+ (temp_big_image != 0) & (minimum_panoramic_image == 0)
+ )
+ where_both_images_overlap = np.where(
+ (minimum_panoramic_image != 0) & (temp_big_image != 0)
+ )
# minimum algorithm
- minimum_panoramic_image[where_temp_big_image_has_value_only] = temp_big_image[
- where_temp_big_image_has_value_only
- ]
+ minimum_panoramic_image[where_temp_big_image_has_value_only] = (
+ temp_big_image[where_temp_big_image_has_value_only]
+ )
minimum_panoramic_image[where_both_images_overlap] = np.minimum(
- minimum_panoramic_image[where_both_images_overlap], temp_big_image[where_both_images_overlap]
+ minimum_panoramic_image[where_both_images_overlap],
+ temp_big_image[where_both_images_overlap],
)
# maximum algorithm
- maximum_panoramic_image[where_temp_big_image_has_value_only] = temp_big_image[
- where_temp_big_image_has_value_only
- ]
+ maximum_panoramic_image[where_temp_big_image_has_value_only] = (
+ temp_big_image[where_temp_big_image_has_value_only]
+ )
maximum_panoramic_image[where_both_images_overlap] = np.maximum(
- maximum_panoramic_image[where_both_images_overlap], temp_big_image[where_both_images_overlap]
+ maximum_panoramic_image[where_both_images_overlap],
+ temp_big_image[where_both_images_overlap],
)
# mean algorithm
mean_panoramic_image[where_temp_big_image_has_value_only] = temp_big_image[
where_temp_big_image_has_value_only
]
mean_panoramic_image[where_both_images_overlap] = (
- mean_panoramic_image[where_both_images_overlap] + temp_big_image[where_both_images_overlap]
+ mean_panoramic_image[where_both_images_overlap]
+ + temp_big_image[where_both_images_overlap]
) / 2
self.top_parent.eventProgress.setValue(_file_index + 1)
@@ -334,4 +378,6 @@ def _get_stitching_algorithm_selected(self):
elif self.ui.use_linear_integration_radioButton.isChecked():
return StitchingAlgorithmType.linear_integration
else:
- raise NotImplementedError("Stitching algorithm has not been implemented yet!")
+ raise NotImplementedError(
+ "Stitching algorithm has not been implemented yet!"
+ )
diff --git a/notebooks/__code/panoramic_stitching/gui_initialization.py b/notebooks/__code/panoramic_stitching/gui_initialization.py
index 0e0d1cd9..cfc5c76b 100755
--- a/notebooks/__code/panoramic_stitching/gui_initialization.py
+++ b/notebooks/__code/panoramic_stitching/gui_initialization.py
@@ -12,7 +12,10 @@
from __code.panoramic_stitching.config_buttons import button
from __code.panoramic_stitching.gui_handler import GuiHandler
from __code.panoramic_stitching.mplcanvas import MplCanvas
-from __code.panoramic_stitching.utilities import make_full_file_name_to_static_folder_of, set_widgets_size
+from __code.panoramic_stitching.utilities import (
+ make_full_file_name_to_static_folder_of,
+ set_widgets_size,
+)
class GuiInitialization:
@@ -80,7 +83,9 @@ def widgets(self):
self.parent.ui.from_to_error_label.setVisible(False)
# remote control
- remote_control = make_full_file_name_to_static_folder_of("remote_control_released.png")
+ remote_control = make_full_file_name_to_static_folder_of(
+ "remote_control_released.png"
+ )
self.parent.ui.remote_control_widget.setIcon(QIcon(remote_control))
set_widgets_size(
widgets=[self.parent.ui.remote_control_widget],
@@ -91,22 +96,38 @@ def widgets(self):
# move buttons
_file_path = os.path.dirname(__file__)
- up_up_arrow_file = make_full_file_name_to_static_folder_of(button["up_up"]["released"])
+ up_up_arrow_file = make_full_file_name_to_static_folder_of(
+ button["up_up"]["released"]
+ )
self.parent.ui.up_up_button.setIcon(QIcon(up_up_arrow_file))
- up_arrow_file = make_full_file_name_to_static_folder_of(button["up"]["released"])
+ up_arrow_file = make_full_file_name_to_static_folder_of(
+ button["up"]["released"]
+ )
self.parent.ui.up_button.setIcon(QIcon(up_arrow_file))
- left_left_arrow_file = make_full_file_name_to_static_folder_of(button["left_left"]["released"])
+ left_left_arrow_file = make_full_file_name_to_static_folder_of(
+ button["left_left"]["released"]
+ )
self.parent.ui.left_left_button.setIcon(QIcon(left_left_arrow_file))
- left_arrow_file = make_full_file_name_to_static_folder_of(button["left"]["released"])
+ left_arrow_file = make_full_file_name_to_static_folder_of(
+ button["left"]["released"]
+ )
self.parent.ui.left_button.setIcon(QIcon(left_arrow_file))
- right_arrow_file = make_full_file_name_to_static_folder_of(button["right"]["released"])
+ right_arrow_file = make_full_file_name_to_static_folder_of(
+ button["right"]["released"]
+ )
self.parent.ui.right_button.setIcon(QIcon(right_arrow_file))
- right_right_arrow_file = make_full_file_name_to_static_folder_of(button["right_right"]["released"])
+ right_right_arrow_file = make_full_file_name_to_static_folder_of(
+ button["right_right"]["released"]
+ )
self.parent.ui.right_right_button.setIcon(QIcon(right_right_arrow_file))
- down_arrow_file = make_full_file_name_to_static_folder_of(button["down"]["released"])
+ down_arrow_file = make_full_file_name_to_static_folder_of(
+ button["down"]["released"]
+ )
self.parent.ui.down_button.setIcon(QIcon(down_arrow_file))
- down_down_arrow_file = make_full_file_name_to_static_folder_of(button["down_down"]["released"])
+ down_down_arrow_file = make_full_file_name_to_static_folder_of(
+ button["down_down"]["released"]
+ )
self.parent.ui.down_down_button.setIcon(QIcon(down_down_arrow_file))
list_ui = [self.parent.ui.left_button, self.parent.ui.right_button]
@@ -137,14 +158,21 @@ def widgets(self):
height=self.button_size["double_vertical_arrow"]["height"],
)
- state_hori_matplotlib = self.parent.ui.enable_horizontal_profile_checkbox.isChecked()
+ state_hori_matplotlib = (
+ self.parent.ui.enable_horizontal_profile_checkbox.isChecked()
+ )
o_gui = GuiHandler(parent=self.parent)
o_gui.enabled_horizontal_profile_widgets(enabled=state_hori_matplotlib)
- state_verti_matplotlib = self.parent.ui.enable_vertical_profile_checkbox.isChecked()
+ state_verti_matplotlib = (
+ self.parent.ui.enable_vertical_profile_checkbox.isChecked()
+ )
o_gui.enabled_vertical_profile_widgets(enabled=state_verti_matplotlib)
- profile_sliders = [self.parent.ui.horizontal_profile_width_slider, self.parent.ui.vertical_profile_width_slider]
+ profile_sliders = [
+ self.parent.ui.horizontal_profile_width_slider,
+ self.parent.ui.vertical_profile_width_slider,
+ ]
for _slider in profile_sliders:
_slider.setMinimum(self.parent.width_profile["min"])
_slider.setMaximum(self.parent.width_profile["max"])
diff --git a/notebooks/__code/panoramic_stitching/image_handler.py b/notebooks/__code/panoramic_stitching/image_handler.py
index cbd1a08a..a3806603 100755
--- a/notebooks/__code/panoramic_stitching/image_handler.py
+++ b/notebooks/__code/panoramic_stitching/image_handler.py
@@ -25,7 +25,9 @@ def update_contour_plot(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
- name_of_file_selected = o_table.get_item_str_from_cell(row=row_selected, column=0)
+ name_of_file_selected = o_table.get_item_str_from_cell(
+ row=row_selected, column=0
+ )
o_get = Get(parent=self.parent)
folder_selected = o_get.get_combobox_folder_selected()
@@ -104,8 +106,12 @@ def update_current_panoramic_image(self):
yoffset = offset_dictionary[_file]["yoffset"]
panoramic_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = _image
self.parent.panoramic_images[folder_selected] = panoramic_image
@@ -117,13 +123,19 @@ def update_current_panoramic_image(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def get_max_offset(self, folder_selected=None):
offset_dictionary = self.parent.offset_dictionary[folder_selected]
- list_xoffset = [offset_dictionary[_key]["xoffset"] for _key in offset_dictionary.keys()]
- list_yoffset = [offset_dictionary[_key]["yoffset"] for _key in offset_dictionary.keys()]
+ list_xoffset = [
+ offset_dictionary[_key]["xoffset"] for _key in offset_dictionary.keys()
+ ]
+ list_yoffset = [
+ offset_dictionary[_key]["yoffset"] for _key in offset_dictionary.keys()
+ ]
return int(np.max(list_yoffset)), int(np.max(list_xoffset))
@@ -135,16 +147,24 @@ def update_from_to_roi(self, state=False):
from_roi = self.parent.from_roi
x = from_roi["x"]
y = from_roi["y"]
- self.parent.from_roi_id = pg.ROI([x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True)
+ self.parent.from_roi_id = pg.ROI(
+ [x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True
+ )
self.parent.ui.image_view.addItem(self.parent.from_roi_id)
- self.parent.from_roi_id.sigRegionChanged.connect(self.parent.from_roi_box_changed)
+ self.parent.from_roi_id.sigRegionChanged.connect(
+ self.parent.from_roi_box_changed
+ )
to_roi = self.parent.to_roi
x = to_roi["x"]
y = to_roi["y"]
- self.parent.to_roi_id = pg.ROI([x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True)
+ self.parent.to_roi_id = pg.ROI(
+ [x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True
+ )
self.parent.ui.image_view.addItem(self.parent.to_roi_id)
- self.parent.to_roi_id.sigRegionChanged.connect(self.parent.to_roi_box_changed)
+ self.parent.to_roi_id.sigRegionChanged.connect(
+ self.parent.to_roi_box_changed
+ )
self.update_from_label()
self.update_from_cross_line()
@@ -170,12 +190,18 @@ def update_validity_of_from_to_button(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
- name_of_file_selected = o_table.get_item_str_from_cell(row=row_selected, column=0)
+ name_of_file_selected = o_table.get_item_str_from_cell(
+ row=row_selected, column=0
+ )
offset_dictionary = self.parent.offset_dictionary[folder_selected]
- xoffset_of_selected_image = offset_dictionary[name_of_file_selected]["xoffset"] + HORIZONTAL_MARGIN
- yoffset_of_selected_image = offset_dictionary[name_of_file_selected]["yoffset"] + VERTICAL_MARGIN
+ xoffset_of_selected_image = (
+ offset_dictionary[name_of_file_selected]["xoffset"] + HORIZONTAL_MARGIN
+ )
+ yoffset_of_selected_image = (
+ offset_dictionary[name_of_file_selected]["yoffset"] + VERTICAL_MARGIN
+ )
if (
(x < xoffset_of_selected_image)
@@ -192,7 +218,9 @@ def update_validity_of_from_to_button(self):
self.parent.ui.from_to_error_label.setVisible(from_to_error_label)
if self.parent.remote_control_id:
- self.parent.remote_control_id.ui.move_active_image_pushButton.setEnabled(from_to_button_status)
+ self.parent.remote_control_id.ui.move_active_image_pushButton.setEnabled(
+ from_to_button_status
+ )
def update_from_to_line_label_changed(self):
from_to_roi = self.parent.from_to_roi
@@ -230,11 +258,19 @@ def update_cross_line(self, roi_cross_id=None, roi=None):
line_color = (255, 0, 0, 255, 1)
lines = np.array(
[line_color for _ in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
line_view_binning = pg.GraphItem()
self.parent.ui.image_view.addItem(line_view_binning)
- line_view_binning.setData(pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False)
+ line_view_binning.setData(
+ pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False
+ )
return line_view_binning
@@ -242,13 +278,17 @@ def update_from_cross_line(self):
from_roi_cross_id = self.parent.from_roi_cross_id
from_roi = self.parent.from_roi
- self.parent.from_roi_cross_id = self.update_cross_line(roi_cross_id=from_roi_cross_id, roi=from_roi)
+ self.parent.from_roi_cross_id = self.update_cross_line(
+ roi_cross_id=from_roi_cross_id, roi=from_roi
+ )
def update_to_cross_line(self):
to_roi_cross_id = self.parent.to_roi_cross_id
to_roi = self.parent.to_roi
- self.parent.to_roi_cross_id = self.update_cross_line(roi_cross_id=to_roi_cross_id, roi=to_roi)
+ self.parent.to_roi_cross_id = self.update_cross_line(
+ roi_cross_id=to_roi_cross_id, roi=to_roi
+ )
def update_label(self, label_id=None, roi=None, text=""):
if label_id:
@@ -267,9 +307,13 @@ def update_label(self, label_id=None, roi=None, text=""):
def update_from_label(self):
label_id = self.parent.from_label_id
roi = self.parent.from_roi
- self.parent.from_label_id = self.update_label(label_id=label_id, roi=roi, text="from")
+ self.parent.from_label_id = self.update_label(
+ label_id=label_id, roi=roi, text="from"
+ )
def update_to_label(self):
label_id = self.parent.to_label_id
roi = self.parent.to_roi
- self.parent.to_label_id = self.update_label(label_id=label_id, roi=roi, text="to")
+ self.parent.to_label_id = self.update_label(
+ label_id=label_id, roi=roi, text="to"
+ )
diff --git a/notebooks/__code/panoramic_stitching/load_data.py b/notebooks/__code/panoramic_stitching/load_data.py
index f9b20dd2..8df0b563 100755
--- a/notebooks/__code/panoramic_stitching/load_data.py
+++ b/notebooks/__code/panoramic_stitching/load_data.py
@@ -30,7 +30,7 @@ def keep_only_metadata_defined_in_config(self, list_key=None):
metadata_to_keep[_name] = float(_value)
except KeyError:
continue
-
+
self.metadata = metadata_to_keep
@@ -72,14 +72,18 @@ def run(self):
# record size of images
if _folder_index == 0:
- self.parent.image_height, self.parent.image_width = np.shape(o_norm.data["sample"]["data"][0])
+ self.parent.image_height, self.parent.image_width = np.shape(
+ o_norm.data["sample"]["data"][0]
+ )
local_dict = OrderedDict()
for _index, _file in enumerate(list_files):
_metadatadata = MetadataData()
_metadatadata.data = o_norm.data["sample"]["data"][_index]
_metadatadata.metadata = o_norm.data["sample"]["metadata"][_index]
- _metadatadata.keep_only_metadata_defined_in_config(list_key=self.metadata_key_to_keep)
+ _metadatadata.keep_only_metadata_defined_in_config(
+ list_key=self.metadata_key_to_keep
+ )
local_dict[_file] = _metadatadata
diff --git a/notebooks/__code/panoramic_stitching/panoramic_stitching.py b/notebooks/__code/panoramic_stitching/panoramic_stitching.py
index 59ce9f42..20ecd15d 100755
--- a/notebooks/__code/panoramic_stitching/panoramic_stitching.py
+++ b/notebooks/__code/panoramic_stitching/panoramic_stitching.py
@@ -6,7 +6,9 @@
from __code import load_ui
from __code._utilities.error import NoFilesFound
-from __code._utilities.folder import get_list_of_folders_with_specified_file_type_and_same_number_of_files
+from __code._utilities.folder import (
+ get_list_of_folders_with_specified_file_type_and_same_number_of_files,
+)
from __code._utilities.string import format_html_message
from __code.ipywe import fileselector
from __code.panoramic_stitching.automatically_stitch import AutomaticallyStitch
@@ -14,7 +16,11 @@
from __code.panoramic_stitching.event_handler import EventHandler
from __code.panoramic_stitching.export import Export
from __code.panoramic_stitching.gui_initialization import GuiInitialization
-from __code.panoramic_stitching.image_handler import HORIZONTAL_MARGIN, VERTICAL_MARGIN, ImageHandler
+from __code.panoramic_stitching.image_handler import (
+ HORIZONTAL_MARGIN,
+ VERTICAL_MARGIN,
+ ImageHandler,
+)
from __code.panoramic_stitching.load_data import LoadData
from __code.panoramic_stitching.profile import Profile
from __code.panoramic_stitching.remote_control_handler import RemoteControlHandler
@@ -42,11 +48,16 @@ def select_input_folders(self):
def folder_selected(self, folder_selected):
final_list_folders, list_folders_rejected = (
get_list_of_folders_with_specified_file_type_and_same_number_of_files(
- list_of_folders_to_check=folder_selected, file_extension=self.file_extension
+ list_of_folders_to_check=folder_selected,
+ file_extension=self.file_extension,
)
)
- self.working_dir = os.path.dirname(final_list_folders[0]) if final_list_folders else self.working_dir
+ self.working_dir = (
+ os.path.dirname(final_list_folders[0])
+ if final_list_folders
+ else self.working_dir
+ )
print("there")
if not final_list_folders:
str_list_ext = ", ".join(self.file_extension)
@@ -61,18 +72,24 @@ def folder_selected(self, folder_selected):
final_list_folders.sort()
nbr_folder = len(final_list_folders)
- display(format_html_message(pre_message=f"Notebook is about to work with {nbr_folder} folders!", spacer=""))
+ display(
+ format_html_message(
+ pre_message=f"Notebook is about to work with {nbr_folder} folders!",
+ spacer="",
+ )
+ )
self.final_list_folders = final_list_folders
self.list_folders_rejected = list_folders_rejected
def start_panoramic_stitching(self):
-
final_list_folders = self.final_list_folders
list_folders_rejected = self.list_folders_rejected
# gui initialization
- o_interface = Interface(list_folders=final_list_folders, list_folders_rejected=list_folders_rejected)
+ o_interface = Interface(
+ list_folders=final_list_folders, list_folders_rejected=list_folders_rejected
+ )
o_interface.show()
try:
o_interface.load_data()
@@ -235,15 +252,21 @@ def initialization_after_loading_data(self):
# event handler
def remote_control_widget_pressed(self):
- EventHandler.button_pressed(ui=self.ui.remote_control_widget, name="remote_control")
+ EventHandler.button_pressed(
+ ui=self.ui.remote_control_widget, name="remote_control"
+ )
RemoteControlHandler(parent=self)
def remote_control_widget_released(self):
- EventHandler.button_released(ui=self.ui.remote_control_widget, name="remote_control")
+ EventHandler.button_released(
+ ui=self.ui.remote_control_widget, name="remote_control"
+ )
def list_folder_combobox_value_changed(self, new_folder_selected=None):
o_event = EventHandler(parent=self)
- o_event.list_folder_combobox_value_changed(new_folder_selected=new_folder_selected)
+ o_event.list_folder_combobox_value_changed(
+ new_folder_selected=new_folder_selected
+ )
def visibility_checkbox_changed(self, state=None, row=-1):
o_event = EventHandler(parent=self)
@@ -319,7 +342,9 @@ def enable_vertical_profile_checked(self, state):
def left_left_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.left_left_button, name="left_left")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def left_left_button_released(self):
@@ -328,7 +353,9 @@ def left_left_button_released(self):
def left_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.left_button, name="left")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def left_button_released(self):
@@ -337,7 +364,9 @@ def left_button_released(self):
def right_right_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.right_right_button, name="right_right")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def right_right_button_released(self):
@@ -346,7 +375,9 @@ def right_right_button_released(self):
def right_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.right_button, name="right")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def right_button_released(self):
@@ -355,7 +386,9 @@ def right_button_released(self):
def up_up_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.up_up_button, name="up_up")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def up_up_button_released(self):
@@ -364,7 +397,9 @@ def up_up_button_released(self):
def up_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.up_button, name="up")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def up_button_released(self):
@@ -373,7 +408,9 @@ def up_button_released(self):
def down_down_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.down_down_button, name="down_down")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def down_down_button_released(self):
@@ -382,7 +419,9 @@ def down_down_button_released(self):
def down_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.down_button, name="down")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def down_button_released(self):
diff --git a/notebooks/__code/panoramic_stitching/profile.py b/notebooks/__code/panoramic_stitching/profile.py
index ab3dc352..7d936282 100755
--- a/notebooks/__code/panoramic_stitching/profile.py
+++ b/notebooks/__code/panoramic_stitching/profile.py
@@ -15,7 +15,9 @@ def __init__(self, parent=None):
def horizontal_profile_changed(self):
if self.parent.ui.enable_horizontal_profile_checkbox.isChecked():
roi_id = self.parent.horizontal_profile["id"]
- horizontal_roi_dimensions = Profile.get_x_y_width_height_of_roi(roi_id=roi_id)
+ horizontal_roi_dimensions = Profile.get_x_y_width_height_of_roi(
+ roi_id=roi_id
+ )
self.plot_profile(
x=horizontal_roi_dimensions["x"],
y=horizontal_roi_dimensions["y"],
@@ -36,7 +38,9 @@ def vertical_profile_changed(self):
profile_type="vertical",
)
- def plot_profile(self, x=None, y=None, width=None, height=None, profile_type="horizontal"):
+ def plot_profile(
+ self, x=None, y=None, width=None, height=None, profile_type="horizontal"
+ ):
if profile_type == "horizontal":
plot_ui = self.parent.horizontal_profile_plot
dim_to_keep = 0
@@ -88,15 +92,23 @@ def plot_profile(self, x=None, y=None, width=None, height=None, profile_type="ho
# find part of profile that is inside image
x_left_for_profile = np.max([x, left_of_image]) - left_of_image
- x_right_for_profile = np.min([x + width, left_of_image + image_width]) - left_of_image
+ x_right_for_profile = (
+ np.min([x + width, left_of_image + image_width]) - left_of_image
+ )
y_top_for_profile = np.max([y, top_of_image]) - top_of_image
- y_bottom_for_profile = np.min([y + height, top_of_image + image_height]) - top_of_image
+ y_bottom_for_profile = (
+ np.min([y + height, top_of_image + image_height]) - top_of_image
+ )
if profile_type == "horizontal":
- x_axis_of_profile = np.arange(x_left_for_profile, x_right_for_profile) + left_of_image
+ x_axis_of_profile = (
+ np.arange(x_left_for_profile, x_right_for_profile) + left_of_image
+ )
else:
- x_axis_of_profile = np.arange(y_top_for_profile, y_bottom_for_profile) + top_of_image
+ x_axis_of_profile = (
+ np.arange(y_top_for_profile, y_bottom_for_profile) + top_of_image
+ )
y_axis_of_profile = data_dictionary[_file].data[
y_top_for_profile:y_bottom_for_profile,
diff --git a/notebooks/__code/panoramic_stitching/remote_control_handler.py b/notebooks/__code/panoramic_stitching/remote_control_handler.py
index f09d66f2..2e0b1966 100755
--- a/notebooks/__code/panoramic_stitching/remote_control_handler.py
+++ b/notebooks/__code/panoramic_stitching/remote_control_handler.py
@@ -8,7 +8,10 @@
from __code.panoramic_stitching.config_buttons import button
from __code.panoramic_stitching.event_handler import EventHandler
from __code.panoramic_stitching.image_handler import ImageHandler
-from __code.panoramic_stitching.utilities import make_full_file_name_to_static_folder_of, set_widget_size
+from __code.panoramic_stitching.utilities import (
+ make_full_file_name_to_static_folder_of,
+ set_widget_size,
+)
BORDER_RANGE = 50
@@ -41,7 +44,9 @@ def __init__(self, parent=None):
def initialize_widget(self):
_file_path = os.path.dirname(__file__)
- bring_to_focus_released = make_full_file_name_to_static_folder_of(button["bring_to_focus"]["released"])
+ bring_to_focus_released = make_full_file_name_to_static_folder_of(
+ button["bring_to_focus"]["released"]
+ )
self.ui.bring_to_focus.setIcon(QIcon(bring_to_focus_released))
set_widget_size(widget=self.ui.bring_to_focus, width=500, height=203)
self.check_previous_next_buttons_status()
@@ -89,7 +94,9 @@ def bring_to_focus_method(self):
horizontal_profile["y"] = y0 + BORDER_RANGE
self.parent.horizontal_profile = horizontal_profile
- is_horizontal_profile_enabled = self.parent.ui.enable_horizontal_profile_checkbox.isChecked()
+ is_horizontal_profile_enabled = (
+ self.parent.ui.enable_horizontal_profile_checkbox.isChecked()
+ )
o_event = EventHandler(parent=self.parent)
o_event.horizontal_profile(enabled=is_horizontal_profile_enabled)
@@ -106,7 +113,9 @@ def bring_to_focus_method(self):
vertical_profile["x"] = x0 + BORDER_RANGE
self.parent.vertical_profile = vertical_profile
- is_vertical_profile_enabled = self.parent.ui.enable_vertical_profile_checkbox.isChecked()
+ is_vertical_profile_enabled = (
+ self.parent.ui.enable_vertical_profile_checkbox.isChecked()
+ )
o_event = EventHandler(parent=self.parent)
o_event.vertical_profile(enabled=is_vertical_profile_enabled)
diff --git a/notebooks/__code/panoramic_stitching/status_message_config.py b/notebooks/__code/panoramic_stitching/status_message_config.py
index 50759a85..000ea767 100755
--- a/notebooks/__code/panoramic_stitching/status_message_config.py
+++ b/notebooks/__code/panoramic_stitching/status_message_config.py
@@ -9,7 +9,9 @@ class StatusMessageStatus:
warning = "QStatusBar{padding-left:8px;background:rgba(236,236,236,75);color:red;font-weight:normal;}"
-def show_status_message(parent=None, message="", status=StatusMessageStatus.ready, duration_s=None):
+def show_status_message(
+ parent=None, message="", status=StatusMessageStatus.ready, duration_s=None
+):
parent.ui.statusbar.setStyleSheet(status)
if duration_s:
parent.ui.statusbar.showMessage(message, duration_s * 1000)
diff --git a/notebooks/__code/panoramic_stitching_OLD/config.py b/notebooks/__code/panoramic_stitching_OLD/config.py
index 2397e065..a6f56d92 100755
--- a/notebooks/__code/panoramic_stitching_OLD/config.py
+++ b/notebooks/__code/panoramic_stitching_OLD/config.py
@@ -1,10 +1,34 @@
button = {
- "left": {"pressed": "left_arrow_v2_pressed.png", "released": "left_arrow_v2_released.png"},
- "right": {"pressed": "right_arrow_v2_pressed.png", "released": "right_arrow_v2_released.png"},
- "left_left": {"pressed": "left_left_arrow_v2_pressed.png", "released": "left_left_arrow_v2_released.png"},
- "right_right": {"pressed": "right_right_arrow_v2_pressed.png", "released": "right_right_arrow_v2_released.png"},
- "up": {"pressed": "up_arrow_v2_pressed.png", "released": "up_arrow_v2_released.png"},
- "down": {"pressed": "down_arrow_v2_pressed.png", "released": "down_arrow_v2_released.png"},
- "up_up": {"pressed": "up_up_arrow_v2_pressed.png", "released": "up_up_arrow_v2_released.png"},
- "down_down": {"pressed": "down_down_arrow_v2_pressed.png", "released": "down_down_arrow_v2_released.png"},
+ "left": {
+ "pressed": "left_arrow_v2_pressed.png",
+ "released": "left_arrow_v2_released.png",
+ },
+ "right": {
+ "pressed": "right_arrow_v2_pressed.png",
+ "released": "right_arrow_v2_released.png",
+ },
+ "left_left": {
+ "pressed": "left_left_arrow_v2_pressed.png",
+ "released": "left_left_arrow_v2_released.png",
+ },
+ "right_right": {
+ "pressed": "right_right_arrow_v2_pressed.png",
+ "released": "right_right_arrow_v2_released.png",
+ },
+ "up": {
+ "pressed": "up_arrow_v2_pressed.png",
+ "released": "up_arrow_v2_released.png",
+ },
+ "down": {
+ "pressed": "down_arrow_v2_pressed.png",
+ "released": "down_arrow_v2_released.png",
+ },
+ "up_up": {
+ "pressed": "up_up_arrow_v2_pressed.png",
+ "released": "up_up_arrow_v2_released.png",
+ },
+ "down_down": {
+ "pressed": "down_down_arrow_v2_pressed.png",
+ "released": "down_down_arrow_v2_released.png",
+ },
}
diff --git a/notebooks/__code/panoramic_stitching_OLD/gui_initialization.py b/notebooks/__code/panoramic_stitching_OLD/gui_initialization.py
index 81ee7ce1..e87de30d 100755
--- a/notebooks/__code/panoramic_stitching_OLD/gui_initialization.py
+++ b/notebooks/__code/panoramic_stitching_OLD/gui_initialization.py
@@ -94,16 +94,22 @@ def table(self):
# reference image
_combobox_ref = QtGui.QComboBox()
_combobox_ref.blockSignals(True)
- _combobox_ref.currentIndexChanged.connect(self.parent.table_widget_reference_image_changed)
+ _combobox_ref.currentIndexChanged.connect(
+ self.parent.table_widget_reference_image_changed
+ )
_combobox_ref.addItems(self.parent.list_reference["basename_files"])
- _combobox_ref.setCurrentIndex(_dict_of_this_row["reference_combobox_file_index"])
+ _combobox_ref.setCurrentIndex(
+ _dict_of_this_row["reference_combobox_file_index"]
+ )
_combobox_ref.blockSignals(False)
self.parent.ui.tableWidget.setCellWidget(_row, 0, _combobox_ref)
# target image
_combobox = QtGui.QComboBox()
_combobox.blockSignals(True)
- _combobox.currentIndexChanged.connect(self.parent.table_widget_target_image_changed)
+ _combobox.currentIndexChanged.connect(
+ self.parent.table_widget_target_image_changed
+ )
_combobox.addItems(self.parent.list_target["basename_files"])
_combobox.setCurrentIndex(_dict_of_this_row["target_combobox_file_index"])
_combobox.blockSignals(False)
@@ -126,38 +132,54 @@ def widgets(self):
config.button["up_up"]["released"]
)
self.parent.ui.up_up_button.setIcon(QtGui.QIcon(up_up_arrow_file))
- up_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(config.button["up"]["released"])
+ up_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(
+ config.button["up"]["released"]
+ )
self.parent.ui.up_button.setIcon(QtGui.QIcon(up_arrow_file))
- left_left_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(
- config.button["left_left"]["released"]
+ left_left_arrow_file = (
+ GuiInitialization.__make_full_file_name_to_static_folder_of(
+ config.button["left_left"]["released"]
+ )
)
self.parent.ui.left_left_button.setIcon(QtGui.QIcon(left_left_arrow_file))
- left_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(config.button["left"]["released"])
+ left_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(
+ config.button["left"]["released"]
+ )
self.parent.ui.left_button.setIcon(QtGui.QIcon(left_arrow_file))
right_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(
config.button["right"]["released"]
)
self.parent.ui.right_button.setIcon(QtGui.QIcon(right_arrow_file))
- right_right_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(
- config.button["right_right"]["released"]
+ right_right_arrow_file = (
+ GuiInitialization.__make_full_file_name_to_static_folder_of(
+ config.button["right_right"]["released"]
+ )
)
self.parent.ui.right_right_button.setIcon(QtGui.QIcon(right_right_arrow_file))
- down_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(config.button["down"]["released"])
+ down_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(
+ config.button["down"]["released"]
+ )
self.parent.ui.down_button.setIcon(QtGui.QIcon(down_arrow_file))
- down_down_arrow_file = GuiInitialization.__make_full_file_name_to_static_folder_of(
- config.button["down_down"]["released"]
+ down_down_arrow_file = (
+ GuiInitialization.__make_full_file_name_to_static_folder_of(
+ config.button["down_down"]["released"]
+ )
)
self.parent.ui.down_down_button.setIcon(QtGui.QIcon(down_down_arrow_file))
list_ui = [self.parent.ui.left_button, self.parent.ui.right_button]
GuiInitialization.__set_widgets_size(
- widgets=list_ui, width=BUTTON_SIZE["single_arrow"]["width"], height=BUTTON_SIZE["single_arrow"]["height"]
+ widgets=list_ui,
+ width=BUTTON_SIZE["single_arrow"]["width"],
+ height=BUTTON_SIZE["single_arrow"]["height"],
)
list_ui = [self.parent.ui.left_left_button, self.parent.ui.right_right_button]
GuiInitialization.__set_widgets_size(
- widgets=list_ui, width=BUTTON_SIZE["double_arrow"]["width"], height=BUTTON_SIZE["double_arrow"]["height"]
+ widgets=list_ui,
+ width=BUTTON_SIZE["double_arrow"]["width"],
+ height=BUTTON_SIZE["double_arrow"]["height"],
)
list_ui = [self.parent.ui.up_button, self.parent.ui.down_button]
@@ -210,10 +232,14 @@ def load_configuration(self):
"width": int(roi_row_reference["width"]),
"height": int(roi_row_reference["height"]),
}
- master_dict_row["reference_combobox_file_index"] = int(roi_row_reference["file_index"])
+ master_dict_row["reference_combobox_file_index"] = int(
+ roi_row_reference["file_index"]
+ )
roi_row_target = configuration_roi_row["target"]
- master_dict_row["target_combobox_file_index"] = int(roi_row_target["file_index"])
+ master_dict_row["target_combobox_file_index"] = int(
+ roi_row_target["file_index"]
+ )
master_dict_row["target_roi"] = {
"x0": int(roi_row_target["x0"]),
"y0": int(roi_row_target["y0"]),
diff --git a/notebooks/__code/panoramic_stitching_OLD/panoramic_stitching.py b/notebooks/__code/panoramic_stitching_OLD/panoramic_stitching.py
index 436417bb..5b94a206 100755
--- a/notebooks/__code/panoramic_stitching_OLD/panoramic_stitching.py
+++ b/notebooks/__code/panoramic_stitching_OLD/panoramic_stitching.py
@@ -61,7 +61,9 @@ def __init__(self, parent=None, o_norm=None, configuration=""):
self.o_norm = o_norm
self.list_files = self.o_norm.data["sample"]["file_name"]
- self.basename_list_files = [os.path.basename(_file) for _file in self.list_files]
+ self.basename_list_files = [
+ os.path.basename(_file) for _file in self.list_files
+ ]
self.list_data = self.o_norm.data["sample"]["data"]
@@ -121,7 +123,12 @@ def save_roi_changed(self, data_type="reference"):
height = np.max([y0, y1]) - y0
o_utilities.set_roi_to_master_dict(
- row=row_selected, data_type=data_type, x0=x0, y0=y0, width=width, height=height
+ row=row_selected,
+ data_type=data_type,
+ x0=x0,
+ y0=y0,
+ width=width,
+ height=height,
)
# we need to make sure the target roi has the proper size
@@ -141,8 +148,12 @@ def table_widget_selection_changed(self):
row_selected = o_utilities.get_reference_selected(key="index")
# +1 because the target file starts at the second file
- target_file_index_selected = o_utilities.get_target_index_selected_from_row(row=row_selected)
- reference_file_index_selected = o_utilities.get_reference_index_selected_from_row(row=row_selected)
+ target_file_index_selected = o_utilities.get_target_index_selected_from_row(
+ row=row_selected
+ )
+ reference_file_index_selected = (
+ o_utilities.get_reference_index_selected_from_row(row=row_selected)
+ )
reference_data = self.list_reference["data"][reference_file_index_selected]
target_data = self.list_target["data"][target_file_index_selected]
@@ -183,7 +194,9 @@ def display_data(self, data_type="reference", data=[]):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.histogram_level[data_type][0], self.histogram_level[data_type][1])
+ _histo_widget.setLevels(
+ self.histogram_level[data_type][0], self.histogram_level[data_type][1]
+ )
def display_roi(self, data_type="reference"):
o_utilities = Utilities(parent=self)
@@ -231,7 +244,9 @@ def check_status_of_stitching_button(self):
continue
if _target_file in list_target_file:
- o_utilities.set_status_of_this_row_to_message(row=_row, message="Already used!")
+ o_utilities.set_status_of_this_row_to_message(
+ row=_row, message="Already used!"
+ )
list_target_file.add(_target_file)
if len(list_target_file) == len(self.list_target["files"]) - 1:
diff --git a/notebooks/__code/panoramic_stitching_OLD/stiching.py b/notebooks/__code/panoramic_stitching_OLD/stiching.py
index 325ad74b..55542ac0 100755
--- a/notebooks/__code/panoramic_stitching_OLD/stiching.py
+++ b/notebooks/__code/panoramic_stitching_OLD/stiching.py
@@ -16,16 +16,28 @@ def run_fft(self):
o_utilities = Utilities(parent=self.parent)
for _row in master_dict.keys():
- _data_reference = o_utilities.get_image_for_this_row(data_type="reference", row=_row)
- _data_target = o_utilities.get_image_for_this_row(data_type="target", row=_row)
+ _data_reference = o_utilities.get_image_for_this_row(
+ data_type="reference", row=_row
+ )
+ _data_target = o_utilities.get_image_for_this_row(
+ data_type="target", row=_row
+ )
reference_roi = master_dict[_row]["reference_roi"]
- [ref_x0, ref_y0, ref_width, ref_height] = Stitching.retrieve_roi_parameters(roi_dict=reference_roi)
+ [ref_x0, ref_y0, ref_width, ref_height] = Stitching.retrieve_roi_parameters(
+ roi_dict=reference_roi
+ )
target_roi = master_dict[_row]["target_roi"]
- [target_x0, target_y0, _, _] = Stitching.retrieve_roi_parameters(roi_dict=target_roi)
+ [target_x0, target_y0, _, _] = Stitching.retrieve_roi_parameters(
+ roi_dict=target_roi
+ )
- _data_reference_roi = _data_reference[ref_y0 : ref_y0 + ref_height, ref_x0 : ref_x0 + ref_width]
- _data_target_roi = _data_target[target_y0 : target_y0 + ref_height, target_x0 : target_x0 + ref_width]
+ _data_reference_roi = _data_reference[
+ ref_y0 : ref_y0 + ref_height, ref_x0 : ref_x0 + ref_width
+ ]
+ _data_target_roi = _data_target[
+ target_y0 : target_y0 + ref_height, target_x0 : target_x0 + ref_width
+ ]
f_reference = np.fft.fft2(_data_reference_roi)
f_target = np.fft.fft2(_data_target_roi)
@@ -37,7 +49,10 @@ def run_fft(self):
optimum_x0 = pos[1][0]
optimum_y0 = pos[0][0]
- displacement = {"x": target_x0 - ref_x0 + optimum_x0, "y": target_y0 - ref_y0 + optimum_y0}
+ displacement = {
+ "x": target_x0 - ref_x0 + optimum_x0,
+ "y": target_y0 - ref_y0 + optimum_y0,
+ }
master_dict[_row]["displacement"]["x"] = displacement["x"]
master_dict[_row]["displacement"]["y"] = displacement["y"]
@@ -56,14 +71,18 @@ def run(self):
_data_target = list_target_file["data"][_target_file_index]
reference_roi = master_dict[_row]["reference_roi"]
- [ref_x0, ref_y0, ref_width, ref_height] = Stitching.retrieve_roi_parameters(roi_dict=reference_roi)
+ [ref_x0, ref_y0, ref_width, ref_height] = Stitching.retrieve_roi_parameters(
+ roi_dict=reference_roi
+ )
target_roi = master_dict[_row]["target_roi"]
- [starting_target_x0, starting_target_y0, target_width, target_height] = Stitching.retrieve_roi_parameters(
- roi_dict=target_roi
+ [starting_target_x0, starting_target_y0, target_width, target_height] = (
+ Stitching.retrieve_roi_parameters(roi_dict=target_roi)
)
- _data_reference_of_roi = _data_reference[ref_y0 : ref_y0 + ref_height, ref_x0 : ref_x0 + ref_width]
+ _data_reference_of_roi = _data_reference[
+ ref_y0 : ref_y0 + ref_height, ref_x0 : ref_x0 + ref_width
+ ]
# where to start from
moving_target_x0 = starting_target_x0
@@ -82,11 +101,15 @@ def run(self):
print("Reference:")
print(f"x0:{ref_x0}, y0:{ref_y0}, width:{ref_width}, height:{ref_height}")
print("target:")
- print(f"x0:{starting_target_x0}, y0:{starting_target_y0}, width:{target_width}, height:{target_height}")
+ print(
+ f"x0:{starting_target_x0}, y0:{starting_target_y0}, width:{target_width}, height:{target_height}"
+ )
if DEBUG_JSON:
o_utilities = Utilities(parent=self.parent)
- _reference_file_index = o_utilities.get_reference_index_selected_from_row(row=_row)
+ _reference_file_index = (
+ o_utilities.get_reference_index_selected_from_row(row=_row)
+ )
roi_to_export[str(_row)] = {
"reference": {
@@ -108,13 +131,19 @@ def run(self):
counts_and_x0_position_dict = defaultdict(list)
counts_and_y0_position_dict = defaultdict(list)
- counts_3d = np.zeros((final_target_y0 - moving_target_y0 + 1, final_target_x0 - moving_target_x0 + 1))
+ counts_3d = np.zeros(
+ (
+ final_target_y0 - moving_target_y0 + 1,
+ final_target_x0 - moving_target_x0 + 1,
+ )
+ )
x = 0
y = 0
while moving_target_y0 <= final_target_y0:
_data_target_of_roi = _data_target[
- moving_target_y0 : moving_target_y0 + ref_height, moving_target_x0 : moving_target_x0 + ref_width
+ moving_target_y0 : moving_target_y0 + ref_height,
+ moving_target_x0 : moving_target_x0 + ref_width,
]
_diff_array = np.abs(_data_target_of_roi - _data_reference_of_roi)
@@ -149,7 +178,8 @@ def run(self):
self.parent.debug_big_array_roi_ref = _data_reference_of_roi
self.parent.debug_big_array_roi_target = _data_target[
- optimum_y0 : optimum_y0 + ref_height, optimum_x0 : optimum_x0 + ref_width
+ optimum_y0 : optimum_y0 + ref_height,
+ optimum_x0 : optimum_x0 + ref_width,
]
if DEBUG_JSON:
diff --git a/notebooks/__code/panoramic_stitching_OLD/utilities.py b/notebooks/__code/panoramic_stitching_OLD/utilities.py
index d7e7db73..0ef5a96e 100755
--- a/notebooks/__code/panoramic_stitching_OLD/utilities.py
+++ b/notebooks/__code/panoramic_stitching_OLD/utilities.py
@@ -22,7 +22,15 @@ def get_roi_from_master_dict(self, full_file_name=""):
height = master_dict["height"]
return [x0, y0, width, height]
- def set_roi_to_master_dict(self, row=0, data_type="reference", x0=None, y0=None, width=np.nan, height=np.nan):
+ def set_roi_to_master_dict(
+ self,
+ row=0,
+ data_type="reference",
+ x0=None,
+ y0=None,
+ width=np.nan,
+ height=np.nan,
+ ):
roi_key = f"{data_type}_roi"
roi_dict = self.parent.master_dict[row][roi_key]
if x0:
@@ -54,7 +62,9 @@ def get_reference_selected(self, key="files"):
def get_image_for_this_row(self, data_type="reference", row=0):
if data_type == "reference":
- combobox_index_selected = self.get_reference_index_selected_from_row(row=row)
+ combobox_index_selected = self.get_reference_index_selected_from_row(
+ row=row
+ )
else:
combobox_index_selected = self.get_target_index_selected_from_row(row=row)
return self.parent.list_reference["data"][combobox_index_selected]
@@ -99,12 +109,16 @@ def reset_all_status(self):
@staticmethod
def button_pressed(ui=None, name="left"):
- full_file = Utilities.__make_full_file_name_to_static_folder_of(config.button[name]["pressed"])
+ full_file = Utilities.__make_full_file_name_to_static_folder_of(
+ config.button[name]["pressed"]
+ )
ui.setIcon(QtGui.QIcon(full_file))
@staticmethod
def button_released(ui=None, name="left"):
- full_file = Utilities.__make_full_file_name_to_static_folder_of(config.button[name]["released"])
+ full_file = Utilities.__make_full_file_name_to_static_folder_of(
+ config.button[name]["released"]
+ )
ui.setIcon(QtGui.QIcon(full_file))
@staticmethod
diff --git a/notebooks/__code/panoramic_stitching_for_tof/automatically_stitch.py b/notebooks/__code/panoramic_stitching_for_tof/automatically_stitch.py
index 50e6faed..ac9b7049 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/automatically_stitch.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/automatically_stitch.py
@@ -21,7 +21,9 @@ def run(self):
group_selected = o_get.get_combobox_folder_selected()
# first calculate the long and lift position versus pixel coefficient from the ref. group
- group_reference_offset_dictionary = self.parent.offset_dictionary[group_selected]
+ group_reference_offset_dictionary = self.parent.offset_dictionary[
+ group_selected
+ ]
group_reference_data_dictionary = self.parent.data_dictionary[group_selected]
list_files = group_reference_offset_dictionary.keys()
@@ -29,8 +31,12 @@ def run(self):
list_pixel_vs_motor_lift_axis_value = []
for _file_index, _file in enumerate(list_files):
- long_axis_value = group_reference_data_dictionary[_file].metadata["MotLongAxis.RBV"]
- lift_axis_value = group_reference_data_dictionary[_file].metadata["MotLiftTable.RBV"]
+ long_axis_value = group_reference_data_dictionary[_file].metadata[
+ "MotLongAxis.RBV"
+ ]
+ lift_axis_value = group_reference_data_dictionary[_file].metadata[
+ "MotLiftTable.RBV"
+ ]
if _file_index == 0:
long_axis_reference_value = long_axis_value
@@ -75,15 +81,29 @@ def run(self):
# get xoffset and yofffset pixel/motor position of each image of reference group
for _file_index, _file in enumerate(list_files):
if _file_index == 0:
- long_axis_value_image_reference = data_dictionary[_file].metadata["MotLongAxis.RBV"]
- lift_axis_value_image_reference = data_dictionary[_file].metadata["MotLiftTable.RBV"]
+ long_axis_value_image_reference = data_dictionary[_file].metadata[
+ "MotLongAxis.RBV"
+ ]
+ lift_axis_value_image_reference = data_dictionary[_file].metadata[
+ "MotLiftTable.RBV"
+ ]
continue
- long_axis_value = data_dictionary[_file].metadata["MotLongAxis.RBV"] - long_axis_value_image_reference
- lift_axis_value = data_dictionary[_file].metadata["MotLiftTable.RBV"] - lift_axis_value_image_reference
-
- xoffset_of_this_file = int(long_axis_value * list_pixel_vs_motor_long_axis_value[_file_index])
- yoffset_of_this_file = int(lift_axis_value * list_pixel_vs_motor_lift_axis_value[_file_index])
+ long_axis_value = (
+ data_dictionary[_file].metadata["MotLongAxis.RBV"]
+ - long_axis_value_image_reference
+ )
+ lift_axis_value = (
+ data_dictionary[_file].metadata["MotLiftTable.RBV"]
+ - lift_axis_value_image_reference
+ )
+
+ xoffset_of_this_file = int(
+ long_axis_value * list_pixel_vs_motor_long_axis_value[_file_index]
+ )
+ yoffset_of_this_file = int(
+ lift_axis_value * list_pixel_vs_motor_lift_axis_value[_file_index]
+ )
group_offset_dictionary[_file]["xoffset"] = xoffset_of_this_file
group_offset_dictionary[_file]["yoffset"] = yoffset_of_this_file
diff --git a/notebooks/__code/panoramic_stitching_for_tof/best_contrast_tab_handler.py b/notebooks/__code/panoramic_stitching_for_tof/best_contrast_tab_handler.py
index 8a5c23d8..345e201d 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/best_contrast_tab_handler.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/best_contrast_tab_handler.py
@@ -13,7 +13,9 @@ def __init__(self, parent=None):
self.parent = parent
def display_selected_folder(self):
- folder_name = os.path.basename(self.parent.ui.list_folders_combobox.currentText())
+ folder_name = os.path.basename(
+ self.parent.ui.list_folders_combobox.currentText()
+ )
if self.parent.ui.raw_image_radioButton.isChecked():
image = self.parent.integrated_images[folder_name].data
@@ -24,7 +26,9 @@ def display_selected_folder(self):
# histogram_level = self.parent.histogram_level_best_contrast
status_best_contrast_button = True
- self.parent.ui.best_contrast_bin_size_value.setEnabled(status_best_contrast_button)
+ self.parent.ui.best_contrast_bin_size_value.setEnabled(
+ status_best_contrast_button
+ )
self.parent.ui.bin_size_label.setEnabled(status_best_contrast_button)
# _view = self.parent.ui.image_view_best_contrast.getView()
@@ -98,7 +102,9 @@ def calculate_best_contrast(self):
list_mean_counts_of_bin = []
while left_bin_index < len(list_bin) - 1:
- data_bin = all_data_of_folder[list_bin[left_bin_index] : list_bin[left_bin_index + 1]]
+ data_bin = all_data_of_folder[
+ list_bin[left_bin_index] : list_bin[left_bin_index + 1]
+ ]
mean_data_bin = np.nanmean(data_bin)
list_mean_counts_of_bin.append(mean_data_bin)
left_bin_index += 1
@@ -107,7 +113,8 @@ def calculate_best_contrast(self):
for _bin_index_numerator in np.arange(len(list_bin) - 1):
for _bin_index_denominator in np.arange(len(list_bin) - 1):
bin_ratio = (
- list_mean_counts_of_bin[_bin_index_numerator] / list_mean_counts_of_bin[_bin_index_denominator]
+ list_mean_counts_of_bin[_bin_index_numerator]
+ / list_mean_counts_of_bin[_bin_index_denominator]
)
diff_with_1 = np.abs(1 - bin_ratio)
@@ -126,12 +133,16 @@ def calculate_best_contrast(self):
all_data_of_folder = all_data[folder_key]
image1 = all_data_of_folder[
- list_bin[best_bin_index["numerator"]] : list_bin[best_bin_index["numerator"] + 1]
+ list_bin[best_bin_index["numerator"]] : list_bin[
+ best_bin_index["numerator"] + 1
+ ]
]
image_numerator_mean = np.mean(image1, axis=0)
image2 = all_data_of_folder[
- list_bin[best_bin_index["denominator"]] : list_bin[best_bin_index["denominator"] + 1]
+ list_bin[best_bin_index["denominator"]] : list_bin[
+ best_bin_index["denominator"] + 1
+ ]
]
image_denominator_mean = np.mean(image2, axis=0)
@@ -151,5 +162,7 @@ def calculate_best_contrast(self):
self.parent.eventProgress.setVisible(False)
self.parent.ui.best_contrast_image_radioButton.setEnabled(True)
- self.parent.ui.statusbar.showMessage("Done calculating the best contrast images!", 10000)
+ self.parent.ui.statusbar.showMessage(
+ "Done calculating the best contrast images!", 10000
+ )
QApplication.restoreOverrideCursor()
diff --git a/notebooks/__code/panoramic_stitching_for_tof/coarse_tab_handler.py b/notebooks/__code/panoramic_stitching_for_tof/coarse_tab_handler.py
index 03a35db1..842f9868 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/coarse_tab_handler.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/coarse_tab_handler.py
@@ -25,7 +25,9 @@ def initialize_table(self):
o_table.insert_empty_column(_col)
_widget = QComboBox()
_widget.addItems(short_list_folders)
- _widget.currentIndexChanged.connect(self.parent.coarse_alignment_table_combobox_changed)
+ _widget.currentIndexChanged.connect(
+ self.parent.coarse_alignment_table_combobox_changed
+ )
o_table.insert_widget(row=_row, column=_col, widget=_widget)
column_width = [200 for _ in np.arange(nbr_column)]
@@ -63,8 +65,12 @@ def combobox_changed(self):
panoramic_height = height
# number of empty rows and columns before first file selected
- nbr_empty_rows = self.get_number_of_empty_rows_from_top(nbr_row=nbr_row, nbr_column=nbr_column)
- nbr_empty_columns = self.get_number_of_empty_columns_from_left(nbr_row=nbr_row, nbr_column=nbr_column)
+ nbr_empty_rows = self.get_number_of_empty_rows_from_top(
+ nbr_row=nbr_row, nbr_column=nbr_column
+ )
+ nbr_empty_columns = self.get_number_of_empty_columns_from_left(
+ nbr_row=nbr_row, nbr_column=nbr_column
+ )
panoramic_image = np.zeros((panoramic_height, panoramic_width))
for _row in np.arange(nbr_row):
diff --git a/notebooks/__code/panoramic_stitching_for_tof/data_initialization.py b/notebooks/__code/panoramic_stitching_for_tof/data_initialization.py
index 6fbcaf51..584624df 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/data_initialization.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/data_initialization.py
@@ -28,8 +28,12 @@ def offset_table(self):
o_coarse = CoarseTabHandler(parent=self.parent)
nbr_row = self.parent.ui.coarse_alignment_tableWidget.rowCount()
nbr_column = self.parent.ui.coarse_alignment_tableWidget.columnCount()
- nbr_empty_rows = o_coarse.get_number_of_empty_rows_from_top(nbr_row=nbr_row, nbr_column=nbr_column)
- nbr_empty_columns = o_coarse.get_number_of_empty_columns_from_left(nbr_row=nbr_row, nbr_column=nbr_column)
+ nbr_empty_rows = o_coarse.get_number_of_empty_rows_from_top(
+ nbr_row=nbr_row, nbr_column=nbr_column
+ )
+ nbr_empty_columns = o_coarse.get_number_of_empty_columns_from_left(
+ nbr_row=nbr_row, nbr_column=nbr_column
+ )
nbr_row = nbr_column = nbr_folders
for _row in np.arange(nbr_row):
@@ -39,7 +43,11 @@ def offset_table(self):
if not folder_name == "":
xoffset = (_column - nbr_empty_columns) * image_width
yoffset = (_row - nbr_empty_rows) * image_height
- _offset_dict = {"xoffset": xoffset, "yoffset": yoffset, "visible": True}
+ _offset_dict = {
+ "xoffset": xoffset,
+ "yoffset": yoffset,
+ "visible": True,
+ }
offset_dictionary[folder_name] = _offset_dict
self.parent.offset_dictionary = offset_dictionary
diff --git a/notebooks/__code/panoramic_stitching_for_tof/event_handler.py b/notebooks/__code/panoramic_stitching_for_tof/event_handler.py
index c3c71946..cef0a3bf 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/event_handler.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/event_handler.py
@@ -42,7 +42,9 @@ def check_validate_coarse_alignment_button(self):
if len(list_of_files) == 0:
error_message = "Select the position of the images!"
validate_button = False
- elif (len(list_of_files) == len(list_folders)) and (len(set(list_of_files)) == len(list_folders)):
+ elif (len(list_of_files) == len(list_folders)) and (
+ len(set(list_of_files)) == len(list_folders)
+ ):
validate_button = True
else:
validate_button = False
diff --git a/notebooks/__code/panoramic_stitching_for_tof/export.py b/notebooks/__code/panoramic_stitching_for_tof/export.py
index dc86ed55..08416da5 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/export.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/export.py
@@ -8,7 +8,10 @@
from qtpy.QtWidgets import QFileDialog
from __code.file_handler import copy_and_rename_files_to_folder, make_or_reset_folder
-from __code.panoramic_stitching_for_tof.image_handler import HORIZONTAL_MARGIN, VERTICAL_MARGIN
+from __code.panoramic_stitching_for_tof.image_handler import (
+ HORIZONTAL_MARGIN,
+ VERTICAL_MARGIN,
+)
FILE_PREFIX = "image_"
@@ -21,7 +24,8 @@ def run(self):
output_folder = QFileDialog.getExistingDirectory(
self.parent,
directory=self.parent.working_dir,
- caption="Select where the folder containing the " "panoramic images will be created!",
+ caption="Select where the folder containing the "
+ "panoramic images will be created!",
options=QFileDialog.ShowDirsOnly,
)
if output_folder:
@@ -63,26 +67,39 @@ def create_panoramic_images(self, output_folder=None):
if _folder_index == 0:
panoramic_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = image
continue
temp_big_image = np.zeros((panoramic_height, panoramic_width))
temp_big_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = image
- where_temp_big_image_has_value_only = np.where((temp_big_image != 0) & (panoramic_image == 0))
- where_both_images_overlap = np.where((panoramic_image != 0) & (temp_big_image != 0))
+ where_temp_big_image_has_value_only = np.where(
+ (temp_big_image != 0) & (panoramic_image == 0)
+ )
+ where_both_images_overlap = np.where(
+ (panoramic_image != 0) & (temp_big_image != 0)
+ )
panoramic_image[where_temp_big_image_has_value_only] = temp_big_image[
where_temp_big_image_has_value_only
]
panoramic_image[where_both_images_overlap] = (
- panoramic_image[where_both_images_overlap] + temp_big_image[where_both_images_overlap]
+ panoramic_image[where_both_images_overlap]
+ + temp_big_image[where_both_images_overlap]
) / 2
file_name = FILE_PREFIX + f"{_file_index:04d}.tiff"
@@ -96,7 +113,9 @@ def create_panoramic_images(self, output_folder=None):
def export_images(self, output_folder=None):
new_folder_name = os.path.basename(self.parent.working_dir) + "_panoramic"
- self.parent.ui.statusbar.showMessage(f"Exporting images in folder {new_folder_name}")
+ self.parent.ui.statusbar.showMessage(
+ f"Exporting images in folder {new_folder_name}"
+ )
QtGui.QGuiApplication.processEvents()
new_output_folder_name = os.path.join(output_folder, new_folder_name)
@@ -117,7 +136,9 @@ def export_images(self, output_folder=None):
self.copy_txt_files_to_output_folder(output_folder=new_output_folder_name)
- self.parent.ui.statusbar.showMessage(f"{new_output_folder_name} has been created!", 10000) # 10s
+ self.parent.ui.statusbar.showMessage(
+ f"{new_output_folder_name} has been created!", 10000
+ ) # 10s
QtGui.QGuiApplication.processEvents()
def copy_txt_files_to_output_folder(self, output_folder=None):
@@ -133,5 +154,7 @@ def copy_txt_files_to_output_folder(self, output_folder=None):
list_new_file_names.append(new_name)
copy_and_rename_files_to_folder(
- list_files=list_txt_files, new_list_files_names=list_new_file_names, output_folder=output_folder
+ list_files=list_txt_files,
+ new_list_files_names=list_new_file_names,
+ output_folder=output_folder,
)
diff --git a/notebooks/__code/panoramic_stitching_for_tof/fine_tab_handler.py b/notebooks/__code/panoramic_stitching_for_tof/fine_tab_handler.py
index ec1ac54b..d7e7a789 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/fine_tab_handler.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/fine_tab_handler.py
@@ -36,11 +36,18 @@ def initialize_table_of_offset(self):
else:
editable_flag = editable_columns_boolean[_column_index]
- o_table.insert_item(row=_row_index, column=_column_index, value=_text, editable=editable_flag)
+ o_table.insert_item(
+ row=_row_index,
+ column=_column_index,
+ value=_text,
+ editable=editable_flag,
+ )
# checkbox to turn on/off the visibility of the row
hori_layout = QHBoxLayout()
- spacer_item_left = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacer_item_left = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
hori_layout.addItem(spacer_item_left)
check_box = QCheckBox()
if offset_entry["visible"]:
@@ -50,10 +57,15 @@ def initialize_table_of_offset(self):
check_box.setCheckState(_state)
check_box.stateChanged.connect(
- lambda state=0, row=_row_index: self.parent.visibility_checkbox_changed(state=state, row=row)
+ lambda state=0,
+ row=_row_index: self.parent.visibility_checkbox_changed(
+ state=state, row=row
+ )
)
hori_layout.addWidget(check_box)
- spacer_item_right = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacer_item_right = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
hori_layout.addItem(spacer_item_right)
cell_widget = QWidget()
cell_widget.setLayout(hori_layout)
@@ -106,7 +118,9 @@ def roi_box_changed(
self,
roi_id=None,
):
- region = roi_id.getArraySlice(self.parent.current_live_image, self.parent.ui.image_view.imageItem)
+ region = roi_id.getArraySlice(
+ self.parent.current_live_image, self.parent.ui.image_view.imageItem
+ )
x0 = region[0][0].start
y0 = region[0][1].start
@@ -143,13 +157,17 @@ def from_to_button_pushed(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
- current_xoffset_of_selected_row = int(o_table.get_item_str_from_cell(row=row_selected, column=1))
+ current_xoffset_of_selected_row = int(
+ o_table.get_item_str_from_cell(row=row_selected, column=1)
+ )
new_xoffset = int(current_xoffset_of_selected_row - delta_x)
self.parent.ui.tableWidget.item(row_selected, 1).setText(str(new_xoffset))
o_event = TOFEventHandler(parent=self.parent)
o_event.save_table_offset_of_this_cell(row=row_selected, column=1)
- current_yoffset_of_selected_row = int(o_table.get_item_str_from_cell(row=row_selected, column=2))
+ current_yoffset_of_selected_row = int(
+ o_table.get_item_str_from_cell(row=row_selected, column=2)
+ )
new_yoffset = current_yoffset_of_selected_row - delta_y
self.parent.ui.tableWidget.item(row_selected, 2).setText(str(new_yoffset))
o_event.save_table_offset_of_this_cell(row=row_selected, column=2)
diff --git a/notebooks/__code/panoramic_stitching_for_tof/gui_initialization.py b/notebooks/__code/panoramic_stitching_for_tof/gui_initialization.py
index 19ba1655..90988f86 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/gui_initialization.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/gui_initialization.py
@@ -12,7 +12,10 @@
from __code.panoramic_stitching.config_buttons import button
from __code.panoramic_stitching.gui_handler import GuiHandler
from __code.panoramic_stitching.mplcanvas import MplCanvas
-from __code.panoramic_stitching.utilities import make_full_file_name_to_static_folder_of, set_widgets_size
+from __code.panoramic_stitching.utilities import (
+ make_full_file_name_to_static_folder_of,
+ set_widgets_size,
+)
from __code.panoramic_stitching_for_tof.coarse_tab_handler import CoarseTabHandler
@@ -56,7 +59,9 @@ def splitter(self):
def pyqtgraph(self):
# calculate best contrast images
_view1 = pg.PlotItem()
- self.parent.ui.image_view_best_contrast = pg.ImageView(view=_view1, name="view1")
+ self.parent.ui.image_view_best_contrast = pg.ImageView(
+ view=_view1, name="view1"
+ )
self.parent.ui.image_view_best_contrast.ui.roiBtn.hide()
self.parent.ui.image_view_best_contrast.ui.menuBtn.hide()
image_layout_best_contrast = QVBoxLayout()
@@ -69,7 +74,9 @@ def pyqtgraph(self):
self.parent.ui.image_view_coarse_alignment.ui.roiBtn.hide()
self.parent.ui.image_view_coarse_alignment.ui.menuBtn.hide()
image_layout_coarse_alignment = QVBoxLayout()
- image_layout_coarse_alignment.addWidget(self.parent.ui.image_view_coarse_alignment)
+ image_layout_coarse_alignment.addWidget(
+ self.parent.ui.image_view_coarse_alignment
+ )
self.parent.ui.coarse_alignment_widget.setLayout(image_layout_coarse_alignment)
# stitch images
@@ -110,22 +117,38 @@ def widgets(self):
# move buttons
_file_path = os.path.dirname(__file__)
- up_up_arrow_file = make_full_file_name_to_static_folder_of(button["up_up"]["released"])
+ up_up_arrow_file = make_full_file_name_to_static_folder_of(
+ button["up_up"]["released"]
+ )
self.parent.ui.up_up_button.setIcon(QIcon(up_up_arrow_file))
- up_arrow_file = make_full_file_name_to_static_folder_of(button["up"]["released"])
+ up_arrow_file = make_full_file_name_to_static_folder_of(
+ button["up"]["released"]
+ )
self.parent.ui.up_button.setIcon(QIcon(up_arrow_file))
- left_left_arrow_file = make_full_file_name_to_static_folder_of(button["left_left"]["released"])
+ left_left_arrow_file = make_full_file_name_to_static_folder_of(
+ button["left_left"]["released"]
+ )
self.parent.ui.left_left_button.setIcon(QIcon(left_left_arrow_file))
- left_arrow_file = make_full_file_name_to_static_folder_of(button["left"]["released"])
+ left_arrow_file = make_full_file_name_to_static_folder_of(
+ button["left"]["released"]
+ )
self.parent.ui.left_button.setIcon(QIcon(left_arrow_file))
- right_arrow_file = make_full_file_name_to_static_folder_of(button["right"]["released"])
+ right_arrow_file = make_full_file_name_to_static_folder_of(
+ button["right"]["released"]
+ )
self.parent.ui.right_button.setIcon(QIcon(right_arrow_file))
- right_right_arrow_file = make_full_file_name_to_static_folder_of(button["right_right"]["released"])
+ right_right_arrow_file = make_full_file_name_to_static_folder_of(
+ button["right_right"]["released"]
+ )
self.parent.ui.right_right_button.setIcon(QIcon(right_right_arrow_file))
- down_arrow_file = make_full_file_name_to_static_folder_of(button["down"]["released"])
+ down_arrow_file = make_full_file_name_to_static_folder_of(
+ button["down"]["released"]
+ )
self.parent.ui.down_button.setIcon(QIcon(down_arrow_file))
- down_down_arrow_file = make_full_file_name_to_static_folder_of(button["down_down"]["released"])
+ down_down_arrow_file = make_full_file_name_to_static_folder_of(
+ button["down_down"]["released"]
+ )
self.parent.ui.down_down_button.setIcon(QIcon(down_down_arrow_file))
list_ui = [self.parent.ui.left_button, self.parent.ui.right_button]
@@ -156,14 +179,21 @@ def widgets(self):
height=self.button_size["double_vertical_arrow"]["height"],
)
- state_hori_matplotlib = self.parent.ui.enable_horizontal_profile_checkbox.isChecked()
+ state_hori_matplotlib = (
+ self.parent.ui.enable_horizontal_profile_checkbox.isChecked()
+ )
o_gui = GuiHandler(parent=self.parent)
o_gui.enabled_horizontal_profile_widgets(enabled=state_hori_matplotlib)
- state_verti_matplotlib = self.parent.ui.enable_vertical_profile_checkbox.isChecked()
+ state_verti_matplotlib = (
+ self.parent.ui.enable_vertical_profile_checkbox.isChecked()
+ )
o_gui.enabled_vertical_profile_widgets(enabled=state_verti_matplotlib)
- profile_sliders = [self.parent.ui.horizontal_profile_width_slider, self.parent.ui.vertical_profile_width_slider]
+ profile_sliders = [
+ self.parent.ui.horizontal_profile_width_slider,
+ self.parent.ui.vertical_profile_width_slider,
+ ]
for _slider in profile_sliders:
_slider.setMinimum(self.parent.width_profile["min"])
_slider.setMaximum(self.parent.width_profile["max"])
@@ -187,7 +217,10 @@ def after_loading_data(self):
self.parent.best_contrast_list_folders_combobox_changed()
# bin size of best contrast (nbr of images / 100 by default)
- bin_size = int(self.parent.nbr_files_per_folder / self.parent.default_best_contrast_bin_size_divider)
+ bin_size = int(
+ self.parent.nbr_files_per_folder
+ / self.parent.default_best_contrast_bin_size_divider
+ )
self.parent.ui.best_contrast_bin_size_value.setText(str(bin_size))
self.parent.bin_size_text_field_return_pressed()
diff --git a/notebooks/__code/panoramic_stitching_for_tof/image_handler.py b/notebooks/__code/panoramic_stitching_for_tof/image_handler.py
index cd273a7f..22dfd5f4 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/image_handler.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/image_handler.py
@@ -25,7 +25,9 @@ def update_contour_plot(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
- name_of_file_selected = o_table.get_item_str_from_cell(row=row_selected, column=0)
+ name_of_file_selected = o_table.get_item_str_from_cell(
+ row=row_selected, column=0
+ )
offset_dictionary = self.parent.offset_dictionary
@@ -102,8 +104,12 @@ def update_current_panoramic_image(self):
yoffset = offset_dictionary[_folder]["yoffset"]
panoramic_image[
- yoffset + VERTICAL_MARGIN : yoffset + image_height + VERTICAL_MARGIN,
- xoffset + HORIZONTAL_MARGIN : xoffset + image_width + HORIZONTAL_MARGIN,
+ yoffset + VERTICAL_MARGIN : yoffset
+ + image_height
+ + VERTICAL_MARGIN,
+ xoffset + HORIZONTAL_MARGIN : xoffset
+ + image_width
+ + HORIZONTAL_MARGIN,
] = _image
self.parent.panoramic_images = panoramic_image
@@ -115,13 +121,19 @@ def update_current_panoramic_image(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def get_max_offset(self):
offset_dictionary = self.parent.offset_dictionary
- list_xoffset = [offset_dictionary[_key]["xoffset"] for _key in offset_dictionary.keys()]
- list_yoffset = [offset_dictionary[_key]["yoffset"] for _key in offset_dictionary.keys()]
+ list_xoffset = [
+ offset_dictionary[_key]["xoffset"] for _key in offset_dictionary.keys()
+ ]
+ list_yoffset = [
+ offset_dictionary[_key]["yoffset"] for _key in offset_dictionary.keys()
+ ]
return int(np.max(list_yoffset)), int(np.max(list_xoffset))
@@ -138,16 +150,24 @@ def update_from_to_roi(self, state=False):
from_roi = self.parent.from_roi
x = from_roi["x"]
y = from_roi["y"]
- self.parent.from_roi_id = pg.ROI([x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True)
+ self.parent.from_roi_id = pg.ROI(
+ [x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True
+ )
self.parent.ui.image_view.addItem(self.parent.from_roi_id)
- self.parent.from_roi_id.sigRegionChanged.connect(self.parent.from_roi_box_changed)
+ self.parent.from_roi_id.sigRegionChanged.connect(
+ self.parent.from_roi_box_changed
+ )
to_roi = self.parent.to_roi
x = to_roi["x"]
y = to_roi["y"]
- self.parent.to_roi_id = pg.ROI([x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True)
+ self.parent.to_roi_id = pg.ROI(
+ [x, y], [ROI_WIDTH, ROI_HEIGHT], scaleSnap=True
+ )
self.parent.ui.image_view.addItem(self.parent.to_roi_id)
- self.parent.to_roi_id.sigRegionChanged.connect(self.parent.to_roi_box_changed)
+ self.parent.to_roi_id.sigRegionChanged.connect(
+ self.parent.to_roi_box_changed
+ )
self.update_from_label()
self.update_from_cross_line()
@@ -162,12 +182,18 @@ def update_validity_of_from_to_button(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
- name_of_file_selected = o_table.get_item_str_from_cell(row=row_selected, column=0)
+ name_of_file_selected = o_table.get_item_str_from_cell(
+ row=row_selected, column=0
+ )
offset_dictionary = self.parent.offset_dictionary
- xoffset_of_selected_image = offset_dictionary[name_of_file_selected]["xoffset"] + HORIZONTAL_MARGIN
- yoffset_of_selected_image = offset_dictionary[name_of_file_selected]["yoffset"] + VERTICAL_MARGIN
+ xoffset_of_selected_image = (
+ offset_dictionary[name_of_file_selected]["xoffset"] + HORIZONTAL_MARGIN
+ )
+ yoffset_of_selected_image = (
+ offset_dictionary[name_of_file_selected]["yoffset"] + VERTICAL_MARGIN
+ )
if (
(x < xoffset_of_selected_image)
@@ -219,11 +245,19 @@ def update_cross_line(self, roi_cross_id=None, roi=None):
line_color = (255, 0, 0, 255, 1)
lines = np.array(
[line_color for _ in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
line_view_binning = pg.GraphItem()
self.parent.ui.image_view.addItem(line_view_binning)
- line_view_binning.setData(pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False)
+ line_view_binning.setData(
+ pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False
+ )
return line_view_binning
@@ -231,13 +265,17 @@ def update_from_cross_line(self):
from_roi_cross_id = self.parent.from_roi_cross_id
from_roi = self.parent.from_roi
- self.parent.from_roi_cross_id = self.update_cross_line(roi_cross_id=from_roi_cross_id, roi=from_roi)
+ self.parent.from_roi_cross_id = self.update_cross_line(
+ roi_cross_id=from_roi_cross_id, roi=from_roi
+ )
def update_to_cross_line(self):
to_roi_cross_id = self.parent.to_roi_cross_id
to_roi = self.parent.to_roi
- self.parent.to_roi_cross_id = self.update_cross_line(roi_cross_id=to_roi_cross_id, roi=to_roi)
+ self.parent.to_roi_cross_id = self.update_cross_line(
+ roi_cross_id=to_roi_cross_id, roi=to_roi
+ )
def update_label(self, label_id=None, roi=None, text=""):
if label_id:
@@ -256,9 +294,13 @@ def update_label(self, label_id=None, roi=None, text=""):
def update_from_label(self):
label_id = self.parent.from_label_id
roi = self.parent.from_roi
- self.parent.from_label_id = self.update_label(label_id=label_id, roi=roi, text="from")
+ self.parent.from_label_id = self.update_label(
+ label_id=label_id, roi=roi, text="from"
+ )
def update_to_label(self):
label_id = self.parent.to_label_id
roi = self.parent.to_roi
- self.parent.to_label_id = self.update_label(label_id=label_id, roi=roi, text="to")
+ self.parent.to_label_id = self.update_label(
+ label_id=label_id, roi=roi, text="to"
+ )
diff --git a/notebooks/__code/panoramic_stitching_for_tof/load_data.py b/notebooks/__code/panoramic_stitching_for_tof/load_data.py
index e1a1b62f..d2c1ba32 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/load_data.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/load_data.py
@@ -38,7 +38,9 @@ def run(self):
master_dict = OrderedDict()
integrated_images_dict = OrderedDict()
for _folder_index, _folder in enumerate(self.list_folders):
- self.parent.ui.statusbar.showMessage(f"Loading data from folder {os.path.basename(_folder)}")
+ self.parent.ui.statusbar.showMessage(
+ f"Loading data from folder {os.path.basename(_folder)}"
+ )
QtGui.QGuiApplication.processEvents()
o_norm = Normalization()
@@ -49,7 +51,9 @@ def run(self):
# record size of images
if _folder_index == 0:
- self.parent.image_height, self.parent.image_width = np.shape(o_norm.data["sample"]["data"][0])
+ self.parent.image_height, self.parent.image_width = np.shape(
+ o_norm.data["sample"]["data"][0]
+ )
local_dict = OrderedDict()
self.parent.nbr_files_per_folder = len(list_files)
@@ -76,10 +80,14 @@ def run(self):
coarse_images_dictionary = OrderedDict()
for _folder in self.parent.integrated_images.keys():
- coarse_images_dictionary[os.path.basename(_folder)] = self.parent.integrated_images[_folder]
+ coarse_images_dictionary[os.path.basename(_folder)] = (
+ self.parent.integrated_images[_folder]
+ )
self.parent.coarse_images_dictionary = coarse_images_dictionary
- self.parent.ui.statusbar.showMessage(f"Done Loading data from {nbr_folder} folders!", 5000)
+ self.parent.ui.statusbar.showMessage(
+ f"Done Loading data from {nbr_folder} folders!", 5000
+ )
QApplication.restoreOverrideCursor()
@staticmethod
diff --git a/notebooks/__code/panoramic_stitching_for_tof/panoramic_stitching_for_tof.py b/notebooks/__code/panoramic_stitching_for_tof/panoramic_stitching_for_tof.py
index 0e247f0c..ab26198f 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/panoramic_stitching_for_tof.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/panoramic_stitching_for_tof.py
@@ -11,7 +11,9 @@
from __code.panoramic_stitching.event_handler import EventHandler
from __code.panoramic_stitching.image_handler import HORIZONTAL_MARGIN, VERTICAL_MARGIN
from __code.panoramic_stitching_for_tof.automatically_stitch import AutomaticallyStitch
-from __code.panoramic_stitching_for_tof.best_contrast_tab_handler import BestContrastTabHandler
+from __code.panoramic_stitching_for_tof.best_contrast_tab_handler import (
+ BestContrastTabHandler,
+)
from __code.panoramic_stitching_for_tof.coarse_tab_handler import CoarseTabHandler
from __code.panoramic_stitching_for_tof.data_initialization import DataInitialization
from __code.panoramic_stitching_for_tof.event_handler import TOFEventHandler
@@ -49,9 +51,8 @@ def folder_selected(self, folder_selected):
str_list_ext = ", ".join(self.file_extension)
display(
format_html_message(
- pre_message="None of the folder selected contains the file of extension " "requested ({}}".format(
- str_list_ext
- ),
+ pre_message="None of the folder selected contains the file of extension "
+ "requested ({}}".format(str_list_ext),
spacer="",
)
)
@@ -59,7 +60,12 @@ def folder_selected(self, folder_selected):
final_list_folders.sort()
nbr_folder = len(final_list_folders)
- display(format_html_message(pre_message=f"Notebook is about to work with {nbr_folder} folders!", spacer=""))
+ display(
+ format_html_message(
+ pre_message=f"Notebook is about to work with {nbr_folder} folders!",
+ spacer="",
+ )
+ )
# gui initialization
o_interface = Interface(list_folders=final_list_folders)
@@ -277,7 +283,9 @@ def enable_vertical_profile_checked(self, state):
def left_left_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.left_left_button, name="left_left")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def left_left_button_released(self):
@@ -286,7 +294,9 @@ def left_left_button_released(self):
def left_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.left_button, name="left")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def left_button_released(self):
@@ -295,7 +305,9 @@ def left_button_released(self):
def right_right_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.right_right_button, name="right_right")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def right_right_button_released(self):
@@ -304,7 +316,9 @@ def right_right_button_released(self):
def right_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.right_button, name="right")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="horizontal", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="horizontal", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.horizontal_profile_changed()
def right_button_released(self):
@@ -313,7 +327,9 @@ def right_button_released(self):
def up_up_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.up_up_button, name="up_up")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=-DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def up_up_button_released(self):
@@ -322,7 +338,9 @@ def up_up_button_released(self):
def up_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.up_button, name="up")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=-SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def up_button_released(self):
@@ -331,7 +349,9 @@ def up_button_released(self):
def down_down_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.down_down_button, name="down_down")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=DOUBLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def down_down_button_released(self):
@@ -340,7 +360,9 @@ def down_down_button_released(self):
def down_button_pressed(self):
EventHandler.button_pressed(ui=self.ui.down_button, name="down")
o_event = EventHandler(parent=self)
- o_event.manual_offset_changed(direction="vertical", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE)
+ o_event.manual_offset_changed(
+ direction="vertical", nbr_pixel=SIMPLE_MANUAL_PIXEL_CHANGE
+ )
self.vertical_profile_changed()
def down_button_released(self):
diff --git a/notebooks/__code/panoramic_stitching_for_tof/profile.py b/notebooks/__code/panoramic_stitching_for_tof/profile.py
index 5c8a852b..3997a9c0 100755
--- a/notebooks/__code/panoramic_stitching_for_tof/profile.py
+++ b/notebooks/__code/panoramic_stitching_for_tof/profile.py
@@ -2,7 +2,10 @@
from __code._utilities.table_handler import TableHandler
from __code.panoramic_stitching_for_tof.get import Get
-from __code.panoramic_stitching_for_tof.image_handler import HORIZONTAL_MARGIN, VERTICAL_MARGIN
+from __code.panoramic_stitching_for_tof.image_handler import (
+ HORIZONTAL_MARGIN,
+ VERTICAL_MARGIN,
+)
COLOR_WORKING_ROW = "red"
COLOR_NONE_WORKING_ROW = "black"
@@ -15,7 +18,9 @@ def __init__(self, parent=None):
def horizontal_profile_changed(self):
if self.parent.ui.enable_horizontal_profile_checkbox.isChecked():
roi_id = self.parent.horizontal_profile["id"]
- horizontal_roi_dimensions = Profile.get_x_y_width_height_of_roi(roi_id=roi_id)
+ horizontal_roi_dimensions = Profile.get_x_y_width_height_of_roi(
+ roi_id=roi_id
+ )
self.plot_profile(
x=horizontal_roi_dimensions["x"],
y=horizontal_roi_dimensions["y"],
@@ -36,7 +41,9 @@ def vertical_profile_changed(self):
profile_type="vertical",
)
- def plot_profile(self, x=None, y=None, width=None, height=None, profile_type="horizontal"):
+ def plot_profile(
+ self, x=None, y=None, width=None, height=None, profile_type="horizontal"
+ ):
if profile_type == "horizontal":
plot_ui = self.parent.horizontal_profile_plot
dim_to_keep = 0
@@ -87,15 +94,23 @@ def plot_profile(self, x=None, y=None, width=None, height=None, profile_type="ho
# find part of profile that is inside image
x_left_for_profile = np.max([x, left_of_image]) - left_of_image
- x_right_for_profile = np.min([x + width, left_of_image + image_width]) - left_of_image
+ x_right_for_profile = (
+ np.min([x + width, left_of_image + image_width]) - left_of_image
+ )
y_top_for_profile = np.max([y, top_of_image]) - top_of_image
- y_bottom_for_profile = np.min([y + height, top_of_image + image_height]) - top_of_image
+ y_bottom_for_profile = (
+ np.min([y + height, top_of_image + image_height]) - top_of_image
+ )
if profile_type == "horizontal":
- x_axis_of_profile = np.arange(x_left_for_profile, x_right_for_profile) + left_of_image
+ x_axis_of_profile = (
+ np.arange(x_left_for_profile, x_right_for_profile) + left_of_image
+ )
else:
- x_axis_of_profile = np.arange(y_top_for_profile, y_bottom_for_profile) + top_of_image
+ x_axis_of_profile = (
+ np.arange(y_top_for_profile, y_bottom_for_profile) + top_of_image
+ )
y_axis_of_profile = data_dictionary[_file].data[
y_top_for_profile:y_bottom_for_profile,
diff --git a/notebooks/__code/profile/display.py b/notebooks/__code/profile/display.py
index c2f7d043..e81be9e3 100755
--- a/notebooks/__code/profile/display.py
+++ b/notebooks/__code/profile/display.py
@@ -17,7 +17,8 @@ def get_image_selected(self, recalculate_image=False):
angle = self.parent.rotation_angle
# rotate all images
self.parent.data_dict["data"] = [
- transform.rotate(_image, angle) for _image in self.parent.data_dict_raw["data"]
+ transform.rotate(_image, angle)
+ for _image in self.parent.data_dict_raw["data"]
]
_image = self.parent.data_dict["data"][slider_index]
@@ -48,7 +49,9 @@ def display_images(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def calculate_matrix_grid(self, grid_size=1, height=1, width=1):
"""calculate the matrix that defines the vertical and horizontal lines
@@ -98,12 +101,17 @@ def display_grid(self):
grid_size = self.parent.ui.grid_size_slider.value()
[height, width] = np.shape(self.parent.live_image)
- pos_adj_dict = self.calculate_matrix_grid(grid_size=grid_size, height=height, width=width)
+ pos_adj_dict = self.calculate_matrix_grid(
+ grid_size=grid_size, height=height, width=width
+ )
pos = pos_adj_dict["pos"]
adj = pos_adj_dict["adj"]
line_color = self.parent.grid_view["color"]
- _transparency_value = 255 - (float(str(self.parent.ui.transparency_slider.value())) / 100) * 255
+ _transparency_value = (
+ 255
+ - (float(str(self.parent.ui.transparency_slider.value())) / 100) * 255
+ )
_list_line_color = list(line_color)
_list_line_color[3] = _transparency_value
line_color = tuple(_list_line_color)
diff --git a/notebooks/__code/profile/export.py b/notebooks/__code/profile/export.py
index cebcb946..ed823857 100755
--- a/notebooks/__code/profile/export.py
+++ b/notebooks/__code/profile/export.py
@@ -30,7 +30,9 @@ def _create_metadata(self, profile_index=0):
y_top = profile_dimension.y_top
y_bottom = profile_dimension.y_bottom
metadata.append("#Profile dimension:")
- metadata.append(f"# * [x0, y0, x1, y1] = [{x_left}, {y_top}, {x_right}, {y_bottom}]")
+ metadata.append(
+ f"# * [x0, y0, x1, y1] = [{x_left}, {y_top}, {x_right}, {y_bottom}]"
+ )
if is_x_profile_direction:
metadata.append("# * integrated over y_axis")
table_axis = ["#x_axis"]
@@ -50,7 +52,9 @@ def _create_data(self, profile_index=0):
all_profiles = []
x_axis = []
for _data in self.parent.data_dict["data"]:
- [x_axis, profile] = self.parent.get_profile(image=np.transpose(_data), profile_roi_row=profile_index)
+ [x_axis, profile] = self.parent.get_profile(
+ image=np.transpose(_data), profile_roi_row=profile_index
+ )
all_profiles.append(list(profile))
data = []
@@ -63,10 +67,17 @@ def _create_data(self, profile_index=0):
def run(self):
_nbr_profiles = self.parent.ui.tableWidget.rowCount()
for _profile_index in np.arange(_nbr_profiles):
- _output_file_name = self._create_output_file_name(profile_index=_profile_index)
+ _output_file_name = self._create_output_file_name(
+ profile_index=_profile_index
+ )
metadata = self._create_metadata(profile_index=_profile_index)
data = self._create_data(profile_index=_profile_index)
- make_ascii_file(metadata=metadata, data=data, output_file_name=_output_file_name, dim="1d")
+ make_ascii_file(
+ metadata=metadata,
+ data=data,
+ output_file_name=_output_file_name,
+ dim="1d",
+ )
display(HTML(f"Exported Profile file {_output_file_name}"))
@@ -78,7 +89,9 @@ def __init__(self, parent=None, export_folder=""):
def _create_output_file_name(self, roi_index=0):
base_name = os.path.basename(self.parent.working_dir)
- output_file_name = os.path.join(self.export_folder, f"{base_name}_profile_{roi_index + 1}.txt")
+ output_file_name = os.path.join(
+ self.export_folder, f"{base_name}_profile_{roi_index + 1}.txt"
+ )
return output_file_name
# def _create_metadata(self, profile_index=0):
diff --git a/notebooks/__code/profile/guide_and_profile_rois_handler.py b/notebooks/__code/profile/guide_and_profile_rois_handler.py
index 13487a80..82a3eb95 100755
--- a/notebooks/__code/profile/guide_and_profile_rois_handler.py
+++ b/notebooks/__code/profile/guide_and_profile_rois_handler.py
@@ -18,14 +18,19 @@ def add(self):
def update(self):
self._define_profile()
- self.parent.ui.image_view.removeItem(self.parent.list_profile_pyqt_roi[self.row])
+ self.parent.ui.image_view.removeItem(
+ self.parent.list_profile_pyqt_roi[self.row]
+ )
self.parent.list_profile_pyqt_roi[self.row] = self.__profile
def _define_guide(self):
"""define the guide"""
guide_roi = pg.RectROI(
[self.parent.default_guide_roi["x0"], self.parent.default_guide_roi["y0"]],
- [self.parent.default_guide_roi["width"], self.parent.default_guide_roi["height"]],
+ [
+ self.parent.default_guide_roi["width"],
+ self.parent.default_guide_roi["height"],
+ ],
pen=self.parent.default_guide_roi["color_activated"],
)
guide_roi.addScaleHandle([1, 1], [0, 0])
@@ -82,7 +87,13 @@ def _define_profile(self):
line_color = tuple(_list_line_color)
lines = np.array(
[line_color for n in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
profile = pg.GraphItem()
diff --git a/notebooks/__code/profile/initialization.py b/notebooks/__code/profile/initialization.py
index e271b156..5564abd5 100755
--- a/notebooks/__code/profile/initialization.py
+++ b/notebooks/__code/profile/initialization.py
@@ -27,20 +27,28 @@ def timestamp_dict(self):
def table(self):
# init the summary table
list_files_full_name = self.parent.data_dict["file_name"]
- list_files_short_name = [os.path.basename(_file) for _file in list_files_full_name]
+ list_files_short_name = [
+ os.path.basename(_file) for _file in list_files_full_name
+ ]
list_time_stamp = self.parent.timestamp_dict["list_time_stamp"]
- list_time_stamp_user_format = self.parent.timestamp_dict["list_time_stamp_user_format"]
+ list_time_stamp_user_format = self.parent.timestamp_dict[
+ "list_time_stamp_user_format"
+ ]
time_0 = list_time_stamp[0]
for _row, _file in enumerate(list_files_short_name):
self.parent.ui.summary_table.insertRow(_row)
self.set_item_summary_table(row=_row, col=0, value=_file)
- self.set_item_summary_table(row=_row, col=1, value=list_time_stamp_user_format[_row])
+ self.set_item_summary_table(
+ row=_row, col=1, value=list_time_stamp_user_format[_row]
+ )
_offset = list_time_stamp[_row] - time_0
self.set_item_summary_table(row=_row, col=2, value=f"{_offset:0.2f}")
self.parent.ui.all_plots_file_name_table.insertRow(_row)
- self.set_item_all_plot_file_name_table(row=_row, value=os.path.basename(_file))
+ self.set_item_all_plot_file_name_table(
+ row=_row, value=os.path.basename(_file)
+ )
def parameters(self):
# init the position of the measurement ROI
@@ -49,7 +57,9 @@ def parameters(self):
self.parent.default_guide_roi["height"] = int(height / 5)
self.parent.default_guide_roi["x0"] = int(width / 2)
self.parent.default_guide_roi["y0"] = int(height / 2)
- self.parent.default_profile_width_values = [str(_value) for _value in self.parent.default_profile_width_values]
+ self.parent.default_profile_width_values = [
+ str(_value) for _value in self.parent.default_profile_width_values
+ ]
def widgets(self):
_file_path = os.path.dirname(__file__)
@@ -57,28 +67,32 @@ def widgets(self):
os.path.join(_file_path, "../static/profile/button_rotation_left_fast.png")
)
self.parent.ui.left_rotation_button_fast.setStyleSheet(
- "background-image: " "url('" + left_rotation_fast_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + left_rotation_fast_file + "'); " + "background-repeat: no-repeat"
)
right_rotation_fast_file = os.path.abspath(
os.path.join(_file_path, "../static/profile/button_rotation_right_fast.png")
)
self.parent.ui.right_rotation_button_fast.setStyleSheet(
- "background-image: " "url('" + right_rotation_fast_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + right_rotation_fast_file + "'); " + "background-repeat: no-repeat"
)
left_rotation_slow_file = os.path.abspath(
os.path.join(_file_path, "../static/profile/button_rotation_left_slow.png")
)
self.parent.ui.left_rotation_button_slow.setStyleSheet(
- "background-image: " "url('" + left_rotation_slow_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + left_rotation_slow_file + "'); " + "background-repeat: no-repeat"
)
right_rotation_slow_file = os.path.abspath(
os.path.join(_file_path, "../static/profile/button_rotation_right_slow.png")
)
self.parent.ui.right_rotation_button_slow.setStyleSheet(
- "background-image: " "url('" + right_rotation_slow_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + right_rotation_slow_file + "'); " + "background-repeat: no-repeat"
)
self.parent.ui.splitter_2.setSizes([250, 50])
@@ -92,12 +106,16 @@ def widgets(self):
# update size of table columns
nbr_columns = self.parent.ui.tableWidget.columnCount()
for _col in range(nbr_columns):
- self.parent.ui.tableWidget.setColumnWidth(_col, self.parent.guide_table_width[_col])
+ self.parent.ui.tableWidget.setColumnWidth(
+ _col, self.parent.guide_table_width[_col]
+ )
# update size of summary table
nbr_columns = self.parent.ui.summary_table.columnCount()
for _col in range(nbr_columns):
- self.parent.ui.summary_table.setColumnWidth(_col, self.parent.summary_table_width[_col])
+ self.parent.ui.summary_table.setColumnWidth(
+ _col, self.parent.summary_table_width[_col]
+ )
self.parent.display_ui = [
self.parent.ui.display_size_label,
diff --git a/notebooks/__code/profile/profile.py b/notebooks/__code/profile/profile.py
index 6c095879..ed15465e 100755
--- a/notebooks/__code/profile/profile.py
+++ b/notebooks/__code/profile/profile.py
@@ -82,7 +82,8 @@ def __init__(self, parent=None, working_dir="", data_dict=None):
QMainWindow.__init__(self, parent=parent)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), os.path.join("ui", "ui_profile.ui")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ os.path.join("ui", "ui_profile.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Profile")
@@ -165,7 +166,9 @@ def update_all_plots(self):
nbr_profile = len(list_index_profile_selected)
nbr_file_selected = len(list_index_file_selected)
color = Color()
- list_rgb_profile_color = color.get_list_rgb(nbr_color=(nbr_profile * nbr_file_selected))
+ list_rgb_profile_color = color.get_list_rgb(
+ nbr_color=(nbr_profile * nbr_file_selected)
+ )
self.ui.all_plots_view.clear()
if nbr_profile == 0:
@@ -175,10 +178,16 @@ def update_all_plots(self):
for _color_index_file, _index_file in enumerate(list_index_file_selected):
_data = self.data_dict["data"][_index_file]
- for _color_index_profile, _index_profile in enumerate(list_index_profile_selected):
+ for _color_index_profile, _index_profile in enumerate(
+ list_index_profile_selected
+ ):
legend = f"File #{_index_file} - Profile #{_index_profile}"
- _color = list_rgb_profile_color[_color_index_file + _color_index_profile * nbr_file_selected]
- [x_axis, y_axis] = self.get_profile(image=np.transpose(_data), profile_roi_row=_index_profile)
+ _color = list_rgb_profile_color[
+ _color_index_file + _color_index_profile * nbr_file_selected
+ ]
+ [x_axis, y_axis] = self.get_profile(
+ image=np.transpose(_data), profile_roi_row=_index_profile
+ )
self.ui.all_plots_view.plot(x_axis, y_axis, name=legend, pen=_color)
def display_image(self, recalculate_image=False):
@@ -247,7 +256,9 @@ def is_row_enabled(self, row=-1):
def update_guide_table_using_guide_rois(self):
for _row, _roi in enumerate(self.list_guide_pyqt_roi):
if self.is_row_enabled(row=_row):
- region = _roi.getArraySlice(self.live_image, self.ui.image_view.imageItem)
+ region = _roi.getArraySlice(
+ self.live_image, self.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop
@@ -320,7 +331,9 @@ def rename_all_plots_profiles_table(self):
"""rename all the profile name"""
nbr_row = self.ui.tableWidget.rowCount()
for _row in np.arange(nbr_row):
- self.ui.all_plots_profiles_table.item(_row, 0).setText(f"Profile # {_row + 1}")
+ self.ui.all_plots_profiles_table.item(_row, 0).setText(
+ f"Profile # {_row + 1}"
+ )
# setter
def set_item_all_plots_profile_table(self, row=0):
@@ -329,12 +342,16 @@ def set_item_all_plots_profile_table(self, row=0):
self.ui.all_plots_profiles_table.setItem(row, 0, item)
def set_item_profile_table(self, row=0):
- spacerItem_left = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacerItem_left = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
widget = QComboBox()
widget.addItems(self.default_profile_width_values)
widget.blockSignals(True)
widget.currentIndexChanged.connect(self.profile_width_changed)
- spacerItem_right = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacerItem_right = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
hori_layout = QHBoxLayout()
hori_layout.addItem(spacerItem_left)
hori_layout.addWidget(widget)
@@ -346,12 +363,16 @@ def set_item_profile_table(self, row=0):
def set_item_main_table(self, row=0, col=0, value=""):
if col == 0:
- spacerItem_left = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacerItem_left = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
widget = QCheckBox()
widget.blockSignals(True)
self.list_table_widget_checkbox.insert(row, widget)
widget.stateChanged.connect(self.guide_state_changed)
- spacerItem_right = QSpacerItem(408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding)
+ spacerItem_right = QSpacerItem(
+ 408, 20, QSizePolicy.Expanding, QSizePolicy.Expanding
+ )
hori_layout = QHBoxLayout()
hori_layout.addItem(spacerItem_left)
hori_layout.addWidget(widget)
@@ -399,7 +420,9 @@ def get_profile_dimensions(self, row=-1):
y_top = y0
y_bottom = y0 + height
- Profile = collections.namedtuple("Profile", ["x_left", "x_right", "y_top", "y_bottom", "profile_center"])
+ Profile = collections.namedtuple(
+ "Profile", ["x_left", "x_right", "y_top", "y_bottom", "profile_center"]
+ )
result = Profile(x_left, x_right, y_top, y_bottom, profile_center)
return result
@@ -420,7 +443,9 @@ def get_profile(self, image=[], profile_roi_row=-1):
mean_axis = 0
x_axis = np.arange(y_top, y_bottom)
- _data = image[x_left:x_right, y_top:y_bottom] # because pyqtgrpah display transpose images
+ _data = image[
+ x_left:x_right, y_top:y_bottom
+ ] # because pyqtgrpah display transpose images
profile = np.mean(_data, axis=mean_axis)
return [x_axis, profile]
@@ -479,7 +504,9 @@ def highlight_guide_profile_pyqt_rois(self, row=-1):
return
try:
- self._highlights_guide_profile_pyqt_roi(row=previous_active_row, status="deactivated")
+ self._highlights_guide_profile_pyqt_roi(
+ row=previous_active_row, status="deactivated"
+ )
self._highlights_guide_profile_pyqt_roi(row=row, status="activated")
except:
pass
@@ -634,7 +661,10 @@ def profile_along_axis_changed(self):
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(
- self, directory=self.working_dir, caption="Select Output Folder", options=QFileDialog.ShowDirsOnly
+ self,
+ directory=self.working_dir,
+ caption="Select Output Folder",
+ options=QFileDialog.ShowDirsOnly,
)
if _export_folder:
o_export = ExportProfiles(parent=self, export_folder=_export_folder)
@@ -656,7 +686,9 @@ def next_image_button_clicked(self):
def help_button_clicked(self):
import webbrowser
- webbrowser.open("https://neutronimaging.pages.ornl.gov/en/tutorial/notebooks/profile/")
+ webbrowser.open(
+ "https://neutronimaging.pages.ornl.gov/en/tutorial/notebooks/profile/"
+ )
def closeEvent(self, event=None):
pass
diff --git a/notebooks/__code/profile_vs_file_index.py b/notebooks/__code/profile_vs_file_index.py
index 2747f008..75e202f2 100755
--- a/notebooks/__code/profile_vs_file_index.py
+++ b/notebooks/__code/profile_vs_file_index.py
@@ -34,7 +34,9 @@ def __init__(self, working_dir="./"):
def select_images(self):
self.folder_ui = ipywe.fileselector.FileSelectorPanel(
- instruction="Select Input Images ...", start_dir=self.working_dir, multiple=True
+ instruction="Select Input Images ...",
+ start_dir=self.working_dir,
+ multiple=True,
)
self.folder_ui.show()
@@ -43,7 +45,11 @@ def load_images(self):
try:
self.list_data_files = self.folder_ui.selected
except:
- display(HTML('Please Select a set of Images First!'))
+ display(
+ HTML(
+ 'Please Select a set of Images First!'
+ )
+ )
return
self.o_load = Normalization()
@@ -71,26 +77,62 @@ def select_profile(self, roi_left=0, roi_top=0, roi_height=-1, roi_width=-1):
if roi_width == -1:
roi_width = self.width - 1
- def plot_images_with_roi(x_left, y_top, width, height, contrast_min, contrast_max):
+ def plot_images_with_roi(
+ x_left, y_top, width, height, contrast_min, contrast_max
+ ):
plt.figure(figsize=(5, 5))
ax_img = plt.subplot(111)
ax_img.imshow(
- self.integrated_data, cmap="rainbow", interpolation=None, vmin=contrast_min, vmax=contrast_max
+ self.integrated_data,
+ cmap="rainbow",
+ interpolation=None,
+ vmin=contrast_min,
+ vmax=contrast_max,
)
- ax_img.add_patch(patches.Rectangle((x_left, y_top), width, height, fill=False))
+ ax_img.add_patch(
+ patches.Rectangle((x_left, y_top), width, height, fill=False)
+ )
return [x_left, y_top, width, height]
self.profile = interact(
plot_images_with_roi,
- x_left=widgets.IntSlider(min=0, max=self.width - 1, step=1, value=roi_left, continuous_update=False),
- y_top=widgets.IntSlider(min=0, max=self.height - 1, step=1, value=roi_top, continuous_update=False),
- width=widgets.IntSlider(min=0, max=self.width - 1, step=1, value=roi_width, continuous_update=False),
- height=widgets.IntSlider(min=0, max=self.height - 1, step=1, value=roi_height, continuous_update=False),
- contrast_min=widgets.FloatSlider(min=0, max=1, step=0.1, value=0, continuous_update=False),
- contrast_max=widgets.FloatSlider(min=0, max=2, value=1, step=0.1, continuous_update=False),
+ x_left=widgets.IntSlider(
+ min=0,
+ max=self.width - 1,
+ step=1,
+ value=roi_left,
+ continuous_update=False,
+ ),
+ y_top=widgets.IntSlider(
+ min=0,
+ max=self.height - 1,
+ step=1,
+ value=roi_top,
+ continuous_update=False,
+ ),
+ width=widgets.IntSlider(
+ min=0,
+ max=self.width - 1,
+ step=1,
+ value=roi_width,
+ continuous_update=False,
+ ),
+ height=widgets.IntSlider(
+ min=0,
+ max=self.height - 1,
+ step=1,
+ value=roi_height,
+ continuous_update=False,
+ ),
+ contrast_min=widgets.FloatSlider(
+ min=0, max=1, step=0.1, value=0, continuous_update=False
+ ),
+ contrast_max=widgets.FloatSlider(
+ min=0, max=2, value=1, step=0.1, continuous_update=False
+ ),
)
def calculate_integrated_profile(self):
@@ -108,7 +150,9 @@ def calculate_integrated_profile(self):
profile_array = []
for _image in sample_data:
- _profile_image = _image[roi_top : roi_top + roi_height, roi_left : roi_left + roi_width]
+ _profile_image = _image[
+ roi_top : roi_top + roi_height, roi_left : roi_left + roi_width
+ ]
_value = np.mean(_profile_image)
profile_array.append(_value)
w.value = index
@@ -121,7 +165,9 @@ def select_vertical_pixel_binning(self):
[
widgets.Label("Vertical Binning", layout=widgets.Layout(width="20%")),
widgets.Dropdown(
- options=["1", "2", "3", "4", "5", "6", "7", "8", "9"], value="1", layout=widgets.Layout(width="20%")
+ options=["1", "2", "3", "4", "5", "6", "7", "8", "9"],
+ value="1",
+ layout=widgets.Layout(width="20%"),
),
]
)
@@ -142,9 +188,13 @@ def calculate_profile(self):
profile_1d = []
for _index, _array in enumerate(sample_data):
- _roi_array = _array[roi_top : roi_top + roi_height, roi_left : roi_left + roi_width]
+ _roi_array = _array[
+ roi_top : roi_top + roi_height, roi_left : roi_left + roi_width
+ ]
_width_profile = np.mean(_roi_array, 1)
- rebin_width_profile = [np.mean(_width_profile[x : x + self.rebin]) for x in self.rebin_range]
+ rebin_width_profile = [
+ np.mean(_width_profile[x : x + self.rebin]) for x in self.rebin_range
+ ]
profile_1d.append(rebin_width_profile)
_index += 1
w.value = _index
@@ -171,19 +221,30 @@ def plot_profile(file_index):
ax_img = plt.subplot(212)
ax_img.imshow(data_2d, cmap="rainbow", interpolation=None)
- ax_img.add_patch(patches.Rectangle((roi_left, roi_top), roi_width, roi_height, fill=False))
+ ax_img.add_patch(
+ patches.Rectangle(
+ (roi_left, roi_top), roi_width, roi_height, fill=False
+ )
+ )
number_of_files = len(self.sample_data)
_ = interact(
plot_profile,
file_index=widgets.IntSlider(
- min=0, max=number_of_files - 1, value=0, step=1, description="Image Index", continuous_update=False
+ min=0,
+ max=number_of_files - 1,
+ value=0,
+ step=1,
+ description="Image Index",
+ continuous_update=False,
),
)
def select_file_name_vs_time_stamp(self):
self.file_ui = ipywe.fileselector.FileSelectorPanel(
- instruction="Select file_name_vs_time_stamp File ...", start_dir=self.working_dir, multiple=False
+ instruction="Select file_name_vs_time_stamp File ...",
+ start_dir=self.working_dir,
+ multiple=False,
)
self.file_ui.show()
@@ -204,12 +265,16 @@ def load_file_name_vs_time_stamp_file(self):
def select_output_folder(self):
self.output_folder_ui = ipywe.fileselector.FileSelectorPanel(
- instruction="Select Output Folder ...", start_dir=self.working_dir, type="directory"
+ instruction="Select Output Folder ...",
+ start_dir=self.working_dir,
+ type="directory",
)
self.output_folder_ui.show()
def __get_time_stamp(self, file_name):
- _index_time_stamp = self.list_data_files_short.index(os.path.basename(file_name))
+ _index_time_stamp = self.list_data_files_short.index(
+ os.path.basename(file_name)
+ )
return self.list_time_stamp[_index_time_stamp]
def output_profiles(self):
@@ -225,7 +290,9 @@ def output_profiles(self):
[roi_left, roi_top, roi_width, roi_height] = self.roi
- self.list_data_files_short = [os.path.basename(_file) for _file in self.df["#filename"]]
+ self.list_data_files_short = [
+ os.path.basename(_file) for _file in self.df["#filename"]
+ ]
time_0 = self.__get_time_stamp(self.list_data_files[0])
w = widgets.IntProgress()
@@ -243,7 +310,9 @@ def output_profiles(self):
)
metadata.append(f"#Rebin in y direction: {self.rebin}")
- _time_stamp_str = datetime.datetime.fromtimestamp(_time_stamp).strftime("%Y-%m-%d %H:%M:%S")
+ _time_stamp_str = datetime.datetime.fromtimestamp(_time_stamp).strftime(
+ "%Y-%m-%d %H:%M:%S"
+ )
metadata.append(f"#Time Stamp: {_time_stamp_str}")
_delta_time = _time_stamp - time_0
@@ -260,10 +329,21 @@ def output_profiles(self):
[base, _] = os.path.splitext(_base_file_name)
output_file_name = os.path.join(output_folder, base + ".txt")
- make_ascii_file(metadata=metadata, data=data, output_file_name=output_file_name, dim="1d")
+ make_ascii_file(
+ metadata=metadata,
+ data=data,
+ output_file_name=output_file_name,
+ dim="1d",
+ )
w.value = _index + 1
w.close()
- display(HTML('Files created in folder ' + output_folder + ""))
+ display(
+ HTML(
+ 'Files created in folder '
+ + output_folder
+ + ""
+ )
+ )
diff --git a/notebooks/__code/radial_profile/display.py b/notebooks/__code/radial_profile/display.py
index 63de6c01..7e5c05e2 100755
--- a/notebooks/__code/radial_profile/display.py
+++ b/notebooks/__code/radial_profile/display.py
@@ -58,11 +58,19 @@ def grid(self):
)
lines = np.array(
[line_color for n in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
line_view_binning = pg.GraphItem()
self.parent.ui.image_view.addItem(line_view_binning)
- line_view_binning.setData(pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False)
+ line_view_binning.setData(
+ pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False
+ )
self.parent.line_view_binning = line_view_binning
diff --git a/notebooks/__code/radial_profile/event_handler.py b/notebooks/__code/radial_profile/event_handler.py
index 69b91eb5..cad6dd43 100755
--- a/notebooks/__code/radial_profile/event_handler.py
+++ b/notebooks/__code/radial_profile/event_handler.py
@@ -27,7 +27,9 @@ def file_index_changed(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def guide_color_changed(self):
red = self.parent.ui.guide_red_slider.value()
@@ -69,14 +71,22 @@ def circle_center_changed(self):
lines = np.array(
[(255, 0, 0, 255, 2), (255, 0, 0, 0, 1), (255, 0, 0, 255, 2)],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
if self.parent.sector_g:
self.parent.ui.image_view.removeItem(self.parent.sector_g)
self.parent.sector_g = pg.GraphItem()
self.parent.ui.image_view.addItem(self.parent.sector_g)
- self.parent.sector_g.setData(pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False)
+ self.parent.sector_g.setData(
+ pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False
+ )
def update_angle_label_position(self):
x0 = int(str(self.parent.ui.circle_x.text()))
diff --git a/notebooks/__code/radial_profile/initialization.py b/notebooks/__code/radial_profile/initialization.py
index 5f3ddc77..490da424 100755
--- a/notebooks/__code/radial_profile/initialization.py
+++ b/notebooks/__code/radial_profile/initialization.py
@@ -1,6 +1,15 @@
import pyqtgraph as pg
from qtpy import QtCore
-from qtpy.QtWidgets import QHBoxLayout, QLabel, QProgressBar, QSizePolicy, QSlider, QSpacerItem, QVBoxLayout, QWidget
+from qtpy.QtWidgets import (
+ QHBoxLayout,
+ QLabel,
+ QProgressBar,
+ QSizePolicy,
+ QSlider,
+ QSpacerItem,
+ QVBoxLayout,
+ QWidget,
+)
from __code._utilities.parent import Parent
from __code.radial_profile.event_handler import EventHandler
@@ -64,9 +73,15 @@ def widgets(self):
# self.parent.ui.lineEdit.setText(str(self.parent.grid_size))
self.parent.ui.guide_red_slider.setValue(self.parent.guide_color_slider["red"])
- self.parent.ui.guide_green_slider.setValue(self.parent.guide_color_slider["green"])
- self.parent.ui.guide_blue_slider.setValue(self.parent.guide_color_slider["blue"])
- self.parent.ui.guide_alpha_slider.setValue(self.parent.guide_color_slider["alpha"])
+ self.parent.ui.guide_green_slider.setValue(
+ self.parent.guide_color_slider["green"]
+ )
+ self.parent.ui.guide_blue_slider.setValue(
+ self.parent.guide_color_slider["blue"]
+ )
+ self.parent.ui.guide_alpha_slider.setValue(
+ self.parent.guide_color_slider["alpha"]
+ )
self.parent.ui.sector_from_value.setText(str(self.parent.sector_range["from"]))
self.parent.ui.sector_to_value.setText(str(self.parent.sector_range["to"]))
diff --git a/notebooks/__code/radial_profile/radial_profile.py b/notebooks/__code/radial_profile/radial_profile.py
index 74d216f3..e56e27bf 100755
--- a/notebooks/__code/radial_profile/radial_profile.py
+++ b/notebooks/__code/radial_profile/radial_profile.py
@@ -36,7 +36,9 @@ def __init__(self, parent_ui=None, data=None, list_files=None, working_dir=""):
self.short_list_files = [os.path.basename(_file) for _file in list_files]
color = Color()
- self.list_rgb_profile_color = color.get_list_rgb(nbr_color=len(self.working_data))
+ self.list_rgb_profile_color = color.get_list_rgb(
+ nbr_color=len(self.working_data)
+ )
def calculate(self, center=None, angle_range=None, max_radius=None):
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
@@ -68,7 +70,9 @@ def calculate(self, center=None, angle_range=None, max_radius=None):
for _index in np.arange(nbr_files):
o_calculation = CalculateRadialProfile(data=self.working_data[_index])
- o_calculation.add_params(center=center, angle_range=angle_range, radius=max_radius)
+ o_calculation.add_params(
+ center=center, angle_range=angle_range, radius=max_radius
+ )
o_calculation.calculate()
_short_file_name = self.short_list_files[_index]
@@ -105,13 +109,18 @@ def export(self, output_folder=""):
make_or_reset_folder(output_folder)
for _index, _file in enumerate(self.list_images):
- time_stamp_of_that_file = MetadataHandler.get_time_stamp(file_name=_file)
+ time_stamp_of_that_file = MetadataHandler.get_time_stamp(
+ file_name=_file
+ )
[input_image_base_name, ext] = os.path.splitext(os.path.basename(_file))
output_file_name = os.path.join(
output_folder,
input_image_base_name
+ "_profile_c_x{}_y{}_angle_{}_to_{}".format(
- self.center["x0"], self.center["y0"], self.angle_range["from"], self.angle_range["to"]
+ self.center["x0"],
+ self.center["y0"],
+ self.angle_range["from"],
+ self.angle_range["to"],
),
)
if self.max_radius:
@@ -121,7 +130,11 @@ def export(self, output_folder=""):
text = [f"# source image: {_file}"]
text.append(f"# timestamp: {time_stamp_of_that_file}")
- text.append("# center [x0, y0]: [{},{}]".format(self.center["x0"], self.center["y0"]))
+ text.append(
+ "# center [x0, y0]: [{},{}]".format(
+ self.center["x0"], self.center["y0"]
+ )
+ )
text.append(
"# angular range from {}degrees to {}degrees".format(
self.angle_range["from"], self.angle_range["to"]
@@ -129,11 +142,21 @@ def export(self, output_folder=""):
)
text.append("")
text.append("#pixel_from_center, Average_counts")
- data = list(zip(np.arange(len(self.profile_data[_index])), self.profile_data[_index], strict=False))
+ data = list(
+ zip(
+ np.arange(len(self.profile_data[_index])),
+ self.profile_data[_index],
+ strict=False,
+ )
+ )
- file_handler.make_ascii_file(metadata=text, data=data, output_file_name=output_file_name)
+ file_handler.make_ascii_file(
+ metadata=text, data=data, output_file_name=output_file_name
+ )
- self.parent_ui.ui.statusbar.showMessage(f"Profiles Exported in {output_folder}!", 10000)
+ self.parent_ui.ui.statusbar.showMessage(
+ f"Profiles Exported in {output_folder}!", 10000
+ )
self.parent_ui.ui.statusbar.setStyleSheet("color: green")
@@ -152,7 +175,12 @@ class SelectRadialParameters(QMainWindow):
sector_range = {"from": 0, "to": 90}
- corners = {"top_right": np.nan, "bottom_right": np.nan, "bottom_left": np.nan, "top_left": np.nan}
+ corners = {
+ "top_right": np.nan,
+ "bottom_right": np.nan,
+ "bottom_left": np.nan,
+ "top_left": np.nan,
+ }
hLine = None
vLine = None
@@ -184,7 +212,8 @@ def __init__(self, parent=None, working_dir="", data_dict=None):
super(QMainWindow, self).__init__(parent)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), os.path.join("ui", "ui_radial_profile.ui")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ os.path.join("ui", "ui_radial_profile.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Define center and sector of profile")
@@ -226,7 +255,9 @@ def __init__(self, parent=None, working_dir="", data_dict=None):
def help_button_clicked(self):
import webbrowser
- webbrowser.open("https://neutronimaging.pages.ornl.gov/tutorial/notebooks/radial_profile/")
+ webbrowser.open(
+ "https://neutronimaging.pages.ornl.gov/tutorial/notebooks/radial_profile/"
+ )
def grid_slider_moved(self, value):
self.grid_size_changed()
@@ -282,10 +313,19 @@ def grid_size_changed(self):
def calculate_profiles_clicked(self):
o_profile = RadialProfile(
- parent_ui=self, data=self.working_data, list_files=self.list_images, working_dir=self.working_dir
+ parent_ui=self,
+ data=self.working_data,
+ list_files=self.list_images,
+ working_dir=self.working_dir,
+ )
+ radius = (
+ self.ui.max_radius_slider.value()
+ if self.ui.max_radius_radioButton.isChecked()
+ else None
+ )
+ o_profile.calculate(
+ center=self.center, angle_range=self.angle_range, max_radius=radius
)
- radius = self.ui.max_radius_slider.value() if self.ui.max_radius_radioButton.isChecked() else None
- o_profile.calculate(center=self.center, angle_range=self.angle_range, max_radius=radius)
self.profile_data = o_profile.profile_data
self.o_profile = o_profile
@@ -293,7 +333,10 @@ def calculate_profiles_clicked(self):
def export_profiles_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(
- self, directory=self.working_dir, caption="Select Output Folder", options=QFileDialog.ShowDirsOnly
+ self,
+ directory=self.working_dir,
+ caption="Select Output Folder",
+ options=QFileDialog.ShowDirsOnly,
)
QApplication.processEvents()
if _export_folder:
diff --git a/notebooks/__code/registration/calculate.py b/notebooks/__code/registration/calculate.py
index 0f9b37d4..a9545d5e 100755
--- a/notebooks/__code/registration/calculate.py
+++ b/notebooks/__code/registration/calculate.py
@@ -59,7 +59,10 @@ def intermediates_points(p1, p2):
x_spacing = (p2[0] - p1[0]) / (nb_points + 1)
y_spacing = (p2[1] - p1[1]) / (nb_points + 1)
- full_array = [[int(p1[0] + i * x_spacing), int(p1[1] + i * y_spacing)] for i in range(1, nb_points + 1)]
+ full_array = [
+ [int(p1[0] + i * x_spacing), int(p1[1] + i * y_spacing)]
+ for i in range(1, nb_points + 1)
+ ]
clean_array = []
for _points in full_array:
diff --git a/notebooks/__code/registration/calculate_profiles_difference.py b/notebooks/__code/registration/calculate_profiles_difference.py
index 61de2239..6b6ce45d 100755
--- a/notebooks/__code/registration/calculate_profiles_difference.py
+++ b/notebooks/__code/registration/calculate_profiles_difference.py
@@ -38,7 +38,9 @@ def run(self):
for _key in self.roi["horizontal"]["profiles"].keys():
_profile = self.roi["horizontal"]["profiles"][_key]["profile"]
offset_found = CalculateProfilesDifference.calculate_pixel_offset(
- profile_reference=reference_profile, working_profile=_profile, max_pixel_range=MAX_PIXEL_RANGE
+ profile_reference=reference_profile,
+ working_profile=_profile,
+ max_pixel_range=MAX_PIXEL_RANGE,
)
self.parent.offset["horizontal"].append(-offset_found)
@@ -49,7 +51,9 @@ def run(self):
for _key in self.roi["vertical"]["profiles"].keys():
_profile = self.roi["vertical"]["profiles"][_key]["profile"]
offset_found = CalculateProfilesDifference.calculate_pixel_offset(
- profile_reference=reference_profile, working_profile=_profile, max_pixel_range=MAX_PIXEL_RANGE
+ profile_reference=reference_profile,
+ working_profile=_profile,
+ max_pixel_range=MAX_PIXEL_RANGE,
)
self.parent.offset["vertical"].append(-offset_found)
@@ -60,14 +64,18 @@ def sum_abs_diff(profile_a, profile_b):
return np.sum(abs_list_diff)
@staticmethod
- def calculate_pixel_offset(profile_reference=None, working_profile=None, max_pixel_range=20):
+ def calculate_pixel_offset(
+ profile_reference=None, working_profile=None, max_pixel_range=20
+ ):
list_profiles = []
for _offset in np.arange(-max_pixel_range, max_pixel_range):
list_profiles.append(np.roll(working_profile, _offset))
list_sum_abs_diff = []
for _profile in list_profiles:
- list_sum_abs_diff.append(CalculateProfilesDifference.sum_abs_diff(_profile, profile_reference))
+ list_sum_abs_diff.append(
+ CalculateProfilesDifference.sum_abs_diff(_profile, profile_reference)
+ )
min_value = np.min(list_sum_abs_diff)
min_index = np.where(min_value == list_sum_abs_diff)[0][0]
diff --git a/notebooks/__code/registration/display.py b/notebooks/__code/registration/display.py
index d56bf215..5dfeec19 100755
--- a/notebooks/__code/registration/display.py
+++ b/notebooks/__code/registration/display.py
@@ -19,7 +19,9 @@ def image(self):
_image = o_get.image_selected()
else: # display selected images according to slider position
# retrieve slider infos
- slider_index = self.parent.ui.opacity_selection_slider.sliderPosition() / 100
+ slider_index = (
+ self.parent.ui.opacity_selection_slider.sliderPosition() / 100
+ )
from_index = int(slider_index)
to_index = int(slider_index + 1)
@@ -33,7 +35,9 @@ def image(self):
_from_coefficient = np.abs(to_index - slider_index)
_to_coefficient = np.abs(slider_index - from_index)
- _image = _from_image * _from_coefficient + _to_image * _to_coefficient
+ _image = (
+ _from_image * _from_coefficient + _to_image * _to_coefficient
+ )
else: # only 1 row selected
_image = o_get.image_selected()
@@ -54,7 +58,9 @@ def image(self):
_histo_widget = self.parent.ui.image_view.getHistogramWidget()
self.parent.histogram_level = _histo_widget.getLevels()
- _opacity_coefficient = self.parent.ui.opacity_slider.value() # betwween 0 and 100
+ _opacity_coefficient = (
+ self.parent.ui.opacity_slider.value()
+ ) # betwween 0 and 100
_opacity_image = _opacity_coefficient / 100.0
_image = np.transpose(_image) * _opacity_image
@@ -67,7 +73,9 @@ def image(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def display_only_reference_image(self):
self.parent.ui.selection_reference_opacity_groupBox.setVisible(False)
@@ -88,7 +96,9 @@ def display_only_reference_image(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def live_image(self):
"""no calculation will be done. This will only display the reference image
@@ -108,7 +118,9 @@ def live_image(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
# we do not want a grid on top
if self.parent.grid_view["item"]:
@@ -120,14 +132,22 @@ def live_image(self):
grid_size = self.parent.ui.grid_size_slider.value()
[width, height] = np.shape(live_image)
- pos_adj_dict = Calculate.calculate_matrix_grid(grid_size=grid_size, height=height, width=width)
+ pos_adj_dict = Calculate.calculate_matrix_grid(
+ grid_size=grid_size, height=height, width=width
+ )
pos = pos_adj_dict["pos"]
adj = pos_adj_dict["adj"]
line_color = self.parent.grid_view["color"]
lines = np.array(
[line_color for n in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
grid = pg.GraphItem()
diff --git a/notebooks/__code/registration/event_handler.py b/notebooks/__code/registration/event_handler.py
index db2ff873..5e5e5693 100755
--- a/notebooks/__code/registration/event_handler.py
+++ b/notebooks/__code/registration/event_handler.py
@@ -68,7 +68,9 @@ def paste_value_copied(self, column=1):
self.parent.ui.tableWidget.blockSignals(True)
for _row in row_selected:
- o_table.set_item_with_str(row=_row, column=column, cell_str=self.parent.value_to_copy)
+ o_table.set_item_with_str(
+ row=_row, column=column, cell_str=self.parent.value_to_copy
+ )
self.parent.ui.tableWidget.blockSignals(False)
@@ -77,7 +79,9 @@ def update_table_according_to_filter(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
- def should_row_be_visible(row_value=None, filter_algo_selected="<=", filter_value=None):
+ def should_row_be_visible(
+ row_value=None, filter_algo_selected="<=", filter_value=None
+ ):
if is_float(filter_value):
o_table.set_all_row_hidden(False)
return
@@ -91,7 +95,9 @@ def should_row_be_visible(row_value=None, filter_algo_selected="<=", filter_valu
if filter_flag:
# select only rows according to filter
- filter_column_selected = self.parent.ui.filter_column_name_comboBox.currentText()
+ filter_column_selected = (
+ self.parent.ui.filter_column_name_comboBox.currentText()
+ )
filter_algo_selected = self.parent.ui.filter_logic_comboBox.currentText()
filter_value = self.parent.ui.filter_value.text()
@@ -106,9 +112,13 @@ def should_row_be_visible(row_value=None, filter_algo_selected="<=", filter_valu
nbr_row = o_table.row_count()
for _row in np.arange(nbr_row):
- _row_value = float(o_table.get_item_str_from_cell(row=_row, column=filter_column_index))
+ _row_value = float(
+ o_table.get_item_str_from_cell(row=_row, column=filter_column_index)
+ )
_should_row_be_visible = should_row_be_visible(
- row_value=_row_value, filter_algo_selected=filter_algo_selected, filter_value=filter_value
+ row_value=_row_value,
+ filter_algo_selected=filter_algo_selected,
+ filter_value=filter_value,
)
o_table.set_row_hidden(_row, not _should_row_be_visible)
else:
@@ -144,7 +154,9 @@ def profile_line_moved(self):
self.parent.legend = self.parent.ui.profile.addLegend()
- region = self.parent.ui.profile_line.getArraySlice(self.parent.live_image, self.parent.ui.image_view.imageItem)
+ region = self.parent.ui.profile_line.getArraySlice(
+ self.parent.live_image, self.parent.ui.image_view.imageItem
+ )
x0 = region[0][0].start + 3
x1 = region[0][0].stop - 3
@@ -169,20 +181,36 @@ def profile_line_moved(self):
continue
_data = np.transpose(self.parent.data_dict["data"][_index])
- _filename = os.path.basename(self.parent.data_dict["file_name"][_index])
- _profile = [_data[_point[0], _point[1]] for _point in intermediate_points]
+ _filename = os.path.basename(
+ self.parent.data_dict["file_name"][_index]
+ )
+ _profile = [
+ _data[_point[0], _point[1]] for _point in intermediate_points
+ ]
self.parent.ui.profile.plot(
- xaxis, _profile, name=_filename, pen=self.parent.list_rgb_profile_color[_index]
+ xaxis,
+ _profile,
+ name=_filename,
+ pen=self.parent.list_rgb_profile_color[_index],
)
else: # selection slider
- slider_index = self.parent.ui.opacity_selection_slider.sliderPosition() / 100
+ slider_index = (
+ self.parent.ui.opacity_selection_slider.sliderPosition() / 100
+ )
from_index = int(slider_index)
_data = np.transpose(self.parent.data_dict["data"][from_index])
- _filename = os.path.basename(self.parent.data_dict["file_name"][from_index])
- _profile = [_data[_point[0], _point[1]] for _point in intermediate_points]
+ _filename = os.path.basename(
+ self.parent.data_dict["file_name"][from_index]
+ )
+ _profile = [
+ _data[_point[0], _point[1]] for _point in intermediate_points
+ ]
self.parent.ui.profile.plot(
- xaxis, _profile, name=_filename, pen=self.parent.list_rgb_profile_color[from_index]
+ xaxis,
+ _profile,
+ name=_filename,
+ pen=self.parent.list_rgb_profile_color[from_index],
)
if from_index == slider_index:
@@ -191,10 +219,17 @@ def profile_line_moved(self):
else:
to_index = int(slider_index + 1)
_data = np.transpose(self.parent.data_dict["data"][to_index])
- _filename = os.path.basename(self.parent.data_dict["file_name"][to_index])
- _profile = [_data[_point[0], _point[1]] for _point in intermediate_points]
+ _filename = os.path.basename(
+ self.parent.data_dict["file_name"][to_index]
+ )
+ _profile = [
+ _data[_point[0], _point[1]] for _point in intermediate_points
+ ]
self.parent.ui.profile.plot(
- xaxis, _profile, name=_filename, pen=self.parent.list_rgb_profile_color[to_index]
+ xaxis,
+ _profile,
+ name=_filename,
+ pen=self.parent.list_rgb_profile_color[to_index],
)
else:
@@ -205,10 +240,17 @@ def profile_line_moved(self):
if not row_selected == self.parent.reference_image_index:
_data = np.transpose(self.parent.data_dict["data"][row_selected])
- _filename = os.path.basename(self.parent.data_dict["file_name"][row_selected])
- _profile = [_data[_point[0], _point[1]] for _point in intermediate_points]
+ _filename = os.path.basename(
+ self.parent.data_dict["file_name"][row_selected]
+ )
+ _profile = [
+ _data[_point[0], _point[1]] for _point in intermediate_points
+ ]
self.parent.ui.profile.plot(
- xaxis, _profile, name=_filename, pen=self.parent.list_rgb_profile_color[row_selected]
+ xaxis,
+ _profile,
+ name=_filename,
+ pen=self.parent.list_rgb_profile_color[row_selected],
)
# selected_image = self.parent.live_image
@@ -219,11 +261,18 @@ def profile_line_moved(self):
# Always display profile reference
reference_image = np.transpose(self.parent.reference_image)
- profile_reference = [reference_image[_point[0], _point[1]] for _point in intermediate_points]
+ profile_reference = [
+ reference_image[_point[0], _point[1]] for _point in intermediate_points
+ ]
- reference_file_name = os.path.basename(self.parent.data_dict["file_name"][self.parent.reference_image_index])
+ reference_file_name = os.path.basename(
+ self.parent.data_dict["file_name"][self.parent.reference_image_index]
+ )
self.parent.ui.profile.plot(
- xaxis, profile_reference, pen=self.parent.color_reference_profile, name=f"Ref.: {reference_file_name}"
+ xaxis,
+ profile_reference,
+ pen=self.parent.color_reference_profile,
+ name=f"Ref.: {reference_file_name}",
)
def modified_images(self, list_row=[], all_row=False):
diff --git a/notebooks/__code/registration/export.py b/notebooks/__code/registration/export.py
index 996f74b9..0ed82d19 100755
--- a/notebooks/__code/registration/export.py
+++ b/notebooks/__code/registration/export.py
@@ -17,15 +17,24 @@ def __init__(self, parent=None):
def run(self):
_export_folder = QFileDialog.getExistingDirectory(
- self.parent, directory=self.working_dir, caption="Select Output Folder", options=QFileDialog.ShowDirsOnly
+ self.parent,
+ directory=self.working_dir,
+ caption="Select Output Folder",
+ options=QFileDialog.ShowDirsOnly,
)
if _export_folder:
# add custom folder name
working_dir_basename = os.path.basename(self.working_dir)
# append "registered" and "time_stamp"
- full_output_folder_name = os.path.join(_export_folder, working_dir_basename + "_registered")
- full_output_folder_name = make_or_increment_folder_name(full_output_folder_name)
- o_export = ExportRegistration(parent=self.parent, export_folder=full_output_folder_name)
+ full_output_folder_name = os.path.join(
+ _export_folder, working_dir_basename + "_registered"
+ )
+ full_output_folder_name = make_or_increment_folder_name(
+ full_output_folder_name
+ )
+ o_export = ExportRegistration(
+ parent=self.parent, export_folder=full_output_folder_name
+ )
o_export.run()
QApplication.processEvents()
@@ -47,12 +56,19 @@ def run(self):
for _row, _data in enumerate(data_dict_raw["data"]):
_filename = list_file_names[_row]
if not _row == self.parent.reference_image_index:
- _xoffset = int(np.floor(float(self.parent.ui.tableWidget.item(_row, 1).text())))
- _yoffset = int(np.floor(float(self.parent.ui.tableWidget.item(_row, 2).text())))
+ _xoffset = int(
+ np.floor(float(self.parent.ui.tableWidget.item(_row, 1).text()))
+ )
+ _yoffset = int(
+ np.floor(float(self.parent.ui.tableWidget.item(_row, 2).text()))
+ )
_rotation = float(self.parent.ui.tableWidget.item(_row, 3).text())
_data_registered = self.registered_data(
- raw_data=_data, xoffset=_xoffset, yoffset=_yoffset, rotation=_rotation
+ raw_data=_data,
+ xoffset=_xoffset,
+ yoffset=_yoffset,
+ rotation=_rotation,
)
else:
_data_registered = _data
diff --git a/notebooks/__code/registration/file_selection.py b/notebooks/__code/registration/file_selection.py
index 2145a3ec..979ce6f5 100755
--- a/notebooks/__code/registration/file_selection.py
+++ b/notebooks/__code/registration/file_selection.py
@@ -12,7 +12,9 @@ def __init__(self, working_dir="./"):
self.working_dir = working_dir
def select_file_help(self, value):
- webbrowser.open("https://neutronimaging.ornl.gov/tutorials/imaging-notebooks/file-selector-tool/")
+ webbrowser.open(
+ "https://neutronimaging.ornl.gov/tutorials/imaging-notebooks/file-selector-tool/"
+ )
def load_files(self, files):
files.sort()
@@ -26,7 +28,10 @@ def select_data(self):
display(help_ui)
self.files_ui = fileselector.FileSelectorPanel(
- instruction="Select Images ...", start_dir=self.working_dir, next=self.load_files, multiple=True
+ instruction="Select Images ...",
+ start_dir=self.working_dir,
+ next=self.load_files,
+ multiple=True,
)
self.files_ui.show()
diff --git a/notebooks/__code/registration/get.py b/notebooks/__code/registration/get.py
index a31394dd..50bc0c16 100755
--- a/notebooks/__code/registration/get.py
+++ b/notebooks/__code/registration/get.py
@@ -14,7 +14,9 @@ def get_list_short_file_selected(self):
list_row_selected = self.list_row_selected()
full_list_files = np.array(self.parent.data_dict["file_name"])
list_file_selected = full_list_files[list_row_selected]
- list_short_file_selected = [os.path.basename(_file) for _file in list_file_selected]
+ list_short_file_selected = [
+ os.path.basename(_file) for _file in list_file_selected
+ ]
return list_short_file_selected
def list_row_selected(self):
@@ -40,7 +42,9 @@ def image_selected(self):
return []
table_selection = table_selection[0]
- top_row = table_selection.topRow() # offset because first image is reference image
+ top_row = (
+ table_selection.topRow()
+ ) # offset because first image is reference image
bottom_row = table_selection.bottomRow() + 1
_image = np.mean(self.parent.data_dict["data"][top_row:bottom_row], axis=0)
diff --git a/notebooks/__code/registration/initialization.py b/notebooks/__code/registration/initialization.py
index d15c876c..bbcee9ea 100755
--- a/notebooks/__code/registration/initialization.py
+++ b/notebooks/__code/registration/initialization.py
@@ -50,7 +50,9 @@ def pyqtgrpah(self):
self.parent.ui.profile_line = pg.LineSegmentROI([[50, 50], [100, 100]], pen="r")
self.parent.ui.image_view.addItem(self.parent.ui.profile_line)
d1.addWidget(self.parent.ui.image_view)
- self.parent.ui.profile_line.sigRegionChanged.connect(self.parent.profile_line_moved)
+ self.parent.ui.profile_line.sigRegionChanged.connect(
+ self.parent.profile_line_moved
+ )
# profile
self.parent.ui.profile = pg.PlotWidget(title="Profile")
@@ -71,7 +73,9 @@ def widgets(self):
# update size of table columns
nbr_columns = self.parent.ui.tableWidget.columnCount()
for _col in range(nbr_columns):
- self.parent.ui.tableWidget.setColumnWidth(_col, self.parent.table_column_width[_col])
+ self.parent.ui.tableWidget.setColumnWidth(
+ _col, self.parent.table_column_width[_col]
+ )
# update slide widget of files
nbr_files = len(self.parent.data_dict["file_name"])
diff --git a/notebooks/__code/registration/manual.py b/notebooks/__code/registration/manual.py
index 6a44120a..f34b532d 100755
--- a/notebooks/__code/registration/manual.py
+++ b/notebooks/__code/registration/manual.py
@@ -38,7 +38,8 @@ def __init__(self, parent=None):
super(QMainWindow, self).__init__(parent)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), os.path.join("ui", "ui_registration_tool.ui")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ os.path.join("ui", "ui_registration_tool.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
self.parent = parent
@@ -48,45 +49,72 @@ def __init__(self, parent=None):
def initialize_widgets(self):
_file_path = os.path.dirname(__file__)
- up_arrow_file = os.path.abspath(os.path.join(_file_path, "../static/up_arrow.png"))
+ up_arrow_file = os.path.abspath(
+ os.path.join(_file_path, "../static/up_arrow.png")
+ )
self.ui.up_button.setIcon(QIcon(up_arrow_file))
- down_arrow_file = os.path.abspath(os.path.join(_file_path, "../static/down_arrow.png"))
+ down_arrow_file = os.path.abspath(
+ os.path.join(_file_path, "../static/down_arrow.png")
+ )
self.ui.down_button.setIcon(QIcon(down_arrow_file))
- right_arrow_file = os.path.abspath(os.path.join(_file_path, "../static/right_arrow.png"))
+ right_arrow_file = os.path.abspath(
+ os.path.join(_file_path, "../static/right_arrow.png")
+ )
self.ui.right_button.setIcon(QIcon(right_arrow_file))
- left_arrow_file = os.path.abspath(os.path.join(_file_path, "../static/left_arrow.png"))
+ left_arrow_file = os.path.abspath(
+ os.path.join(_file_path, "../static/left_arrow.png")
+ )
self.ui.left_button.setIcon(QIcon(left_arrow_file))
- rotate_left_file = os.path.abspath(os.path.join(_file_path, "../static/rotate_left.png"))
+ rotate_left_file = os.path.abspath(
+ os.path.join(_file_path, "../static/rotate_left.png")
+ )
self.ui.rotate_left_button.setIcon(QIcon(rotate_left_file))
- rotate_right_file = os.path.abspath(os.path.join(_file_path, "../static/rotate_right.png"))
+ rotate_right_file = os.path.abspath(
+ os.path.join(_file_path, "../static/rotate_right.png")
+ )
self.ui.rotate_right_button.setIcon(QIcon(rotate_right_file))
- small_rotate_left_file = os.path.abspath(os.path.join(_file_path, "../static/small_rotate_left.png"))
+ small_rotate_left_file = os.path.abspath(
+ os.path.join(_file_path, "../static/small_rotate_left.png")
+ )
self.ui.small_rotate_left_button.setIcon(QIcon(small_rotate_left_file))
- small_rotate_right_file = os.path.abspath(os.path.join(_file_path, "../static/small_rotate_right.png"))
+ small_rotate_right_file = os.path.abspath(
+ os.path.join(_file_path, "../static/small_rotate_right.png")
+ )
self.ui.small_rotate_right_button.setIcon(QIcon(small_rotate_right_file))
- self.list_arrow_widgets = [self.ui.up_button, self.ui.down_button, self.ui.left_button, self.ui.right_button]
+ self.list_arrow_widgets = [
+ self.ui.up_button,
+ self.ui.down_button,
+ self.ui.left_button,
+ self.ui.right_button,
+ ]
self._set_widgets_size(
widgets=self.list_arrow_widgets,
width=self.button_size["arrow"]["width"],
height=self.button_size["arrow"]["height"],
)
- self.list_rotate_widgets = [self.ui.rotate_left_button, self.ui.rotate_right_button]
+ self.list_rotate_widgets = [
+ self.ui.rotate_left_button,
+ self.ui.rotate_right_button,
+ ]
self._set_widgets_size(
widgets=self.list_rotate_widgets,
width=self.button_size["rotate"]["width"],
height=self.button_size["rotate"]["height"],
)
- self.list_small_rotate_widgets = [self.ui.small_rotate_left_button, self.ui.small_rotate_right_button]
+ self.list_small_rotate_widgets = [
+ self.ui.small_rotate_left_button,
+ self.ui.small_rotate_right_button,
+ ]
self._set_widgets_size(
widgets=self.list_small_rotate_widgets,
width=self.button_size["small_rotate"]["width"],
diff --git a/notebooks/__code/registration/marker_handler.py b/notebooks/__code/registration/marker_handler.py
index 2a02b17c..f8b92d57 100755
--- a/notebooks/__code/registration/marker_handler.py
+++ b/notebooks/__code/registration/marker_handler.py
@@ -13,8 +13,12 @@ def display_markers(self, all=False):
return
if all is False:
- _current_tab = self.parent.registration_markers_ui.ui.tabWidget.currentIndex()
- _tab_title = self.parent.registration_markers_ui.ui.tabWidget.tabText(_current_tab)
+ _current_tab = (
+ self.parent.registration_markers_ui.ui.tabWidget.currentIndex()
+ )
+ _tab_title = self.parent.registration_markers_ui.ui.tabWidget.tabText(
+ _current_tab
+ )
self.display_markers_of_tab(marker_name=_tab_title)
else:
for _index, _marker_name in enumerate(self.parent.markers_table.keys()):
@@ -60,11 +64,19 @@ def display_markers_of_tab(self, marker_name=""):
_marker_ui.removeHandle(0)
_marker_ui.sigRegionChanged.connect(self.parent.marker_has_been_moved)
- if nbr_file_selected > 1: # more than 1 file selected, we need to add the index of the file
+ if (
+ nbr_file_selected > 1
+ ): # more than 1 file selected, we need to add the index of the file
text_ui = self.parent.add_marker_label(
- file_index=list_row_selected[_index], marker_index=marker_name, x=x, y=y, color=_color_marker
+ file_index=list_row_selected[_index],
+ marker_index=marker_name,
+ x=x,
+ y=y,
+ color=_color_marker,
+ )
+ self.parent.markers_table[marker_name]["data"][_file]["label_ui"] = (
+ text_ui
)
- self.parent.markers_table[marker_name]["data"][_file]["label_ui"] = text_ui
_marker_data["marker_ui"] = _marker_ui
@@ -81,7 +93,9 @@ def marker_has_been_moved(self):
_marker_data = self.parent.markers_table[_marker_name]["data"][_file]
marker_ui = _marker_data["marker_ui"]
- region = marker_ui.getArraySlice(self.parent.live_image, self.parent.ui.image_view.imageItem)
+ region = marker_ui.getArraySlice(
+ self.parent.live_image, self.parent.ui.image_view.imageItem
+ )
x0 = region[0][0].start
y0 = region[0][1].start
@@ -89,7 +103,9 @@ def marker_has_been_moved(self):
self.parent.markers_table[_marker_name]["data"][_file]["x"] = x0
self.parent.markers_table[_marker_name]["data"][_file]["y"] = y0
- self.parent.registration_markers_ui.update_markers_table_entry(marker_name=_marker_name, file=_file)
+ self.parent.registration_markers_ui.update_markers_table_entry(
+ marker_name=_marker_name, file=_file
+ )
if nbr_file_selected > 1:
_label_ui = _marker_data["label_ui"]
@@ -102,12 +118,19 @@ def marker_has_been_moved(self):
color=_color_marker,
)
self.parent.ui.image_view.addItem(_label_ui)
- self.parent.markers_table[_marker_name]["data"][_file]["label_ui"] = _label_ui
+ self.parent.markers_table[_marker_name]["data"][_file][
+ "label_ui"
+ ] = _label_ui
def add_marker_label(self, file_index=0, marker_index=1, x=0, y=0, color="white"):
html_color = MarkerDefaultSettings.color_html[color]
html_text = 'Marker#:'
- html_text += '
' + str(int(marker_index) + 1)
+ html_text += (
+ ''
+ + str(int(marker_index) + 1)
+ )
html_text += " - File#:"
html_text += '' + str(file_index)
html_text += ""
diff --git a/notebooks/__code/registration/registration.py b/notebooks/__code/registration/registration.py
index 9101b31d..a5b927a2 100755
--- a/notebooks/__code/registration/registration.py
+++ b/notebooks/__code/registration/registration.py
@@ -18,7 +18,9 @@
from __code.registration.manual import ManualLauncher
from __code.registration.marker_handler import MarkerHandler
from __code.registration.registration_auto import RegistrationAuto
-from __code.registration.registration_auto_confirmation import RegistrationAutoConfirmationLauncher
+from __code.registration.registration_auto_confirmation import (
+ RegistrationAutoConfirmationLauncher,
+)
from __code.registration.registration_marker import RegistrationMarkersLauncher
from __code.registration.registration_profile import RegistrationProfileLauncher
from __code.registration.table_handler import TableHandler
@@ -85,7 +87,8 @@ def __init__(self, parent=None, data_dict=None):
)
)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), os.path.join("ui", "ui_registration.ui")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ os.path.join("ui", "ui_registration.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
@@ -100,7 +103,9 @@ def __init__(self, parent=None, data_dict=None):
self.data_dict_raw = copy.deepcopy(data_dict)
self.reference_image = self.data_dict["data"][self.reference_image_index]
self.working_dir = os.path.dirname(self.data_dict["file_name"][0])
- self.reference_image_short_name = str(os.path.basename(self.data_dict["file_name"][0]))
+ self.reference_image_short_name = str(
+ os.path.basename(self.data_dict["file_name"][0])
+ )
# initialization
o_init = Initialization(parent=self)
@@ -116,7 +121,11 @@ def __init__(self, parent=None, data_dict=None):
) # because by default first row = reference selected
def filter_checkbox_clicked(self):
- list_ui = [self.ui.filter_column_name_comboBox, self.ui.filter_logic_comboBox, self.ui.filter_value]
+ list_ui = [
+ self.ui.filter_column_name_comboBox,
+ self.ui.filter_logic_comboBox,
+ self.ui.filter_value,
+ ]
for _ui in list_ui:
_ui.setEnabled(self.ui.filter_checkBox.isChecked())
@@ -230,7 +239,9 @@ def slider_file_changed(self, index_selected):
self.ui.tableWidget.blockSignals(False)
def help_button_clicked(self):
- webbrowser.open("https://neutronimaging.pages.ornl.gov/tutorial/notebooks/registration/")
+ webbrowser.open(
+ "https://neutronimaging.pages.ornl.gov/tutorial/notebooks/registration/"
+ )
def ok_button_clicked(self):
self.close()
@@ -258,7 +269,11 @@ def next_image_button_clicked(self):
def selection_all_clicked(self):
_is_checked = self.ui.selection_all.isChecked()
- list_widgets = [self.ui.top_row_label, self.ui.bottom_row_label, self.ui.opacity_selection_slider]
+ list_widgets = [
+ self.ui.top_row_label,
+ self.ui.bottom_row_label,
+ self.ui.opacity_selection_slider,
+ ]
for _widget in list_widgets:
_widget.setEnabled(not _is_checked)
self.display_image()
@@ -294,7 +309,9 @@ def manual_registration_button_clicked(self):
)
def auto_registration_button_clicked(self):
- o_registration_auto_confirmed = RegistrationAutoConfirmationLauncher(parent=self)
+ o_registration_auto_confirmed = RegistrationAutoConfirmationLauncher(
+ parent=self
+ )
def markers_registration_button_clicked(self):
o_markers_registration = RegistrationMarkersLauncher(parent=self)
@@ -320,7 +337,9 @@ def profiler_registration_button_clicked(self):
def start_auto_registration(self):
o_auto_register = RegistrationAuto(
- parent=self, reference_image=self.reference_image, floating_images=self.data_dict["data"]
+ parent=self,
+ reference_image=self.reference_image,
+ floating_images=self.data_dict["data"],
)
o_auto_register.auto_align()
diff --git a/notebooks/__code/registration/registration_auto.py b/notebooks/__code/registration/registration_auto.py
index 8582cbfc..b5893b4f 100755
--- a/notebooks/__code/registration/registration_auto.py
+++ b/notebooks/__code/registration/registration_auto.py
@@ -25,7 +25,9 @@ def auto_align(self):
self.parent.eventProgress.setVisible(True)
for _row, _image in enumerate(_list_images):
- [yoffset, xoffset], error, diffphase = register_translation(_ref_image, _image)
+ [yoffset, xoffset], error, diffphase = register_translation(
+ _ref_image, _image
+ )
if not _row == self.parent.reference_image_index:
self.parent.set_item(row=_row, col=1, value=xoffset)
self.parent.set_item(row=_row, col=2, value=yoffset)
diff --git a/notebooks/__code/registration/registration_auto_confirmation.py b/notebooks/__code/registration/registration_auto_confirmation.py
index 0d83070d..e884120a 100755
--- a/notebooks/__code/registration/registration_auto_confirmation.py
+++ b/notebooks/__code/registration/registration_auto_confirmation.py
@@ -35,7 +35,9 @@ def __init__(self, parent=None):
def initialize_widgets(self):
_file_path = os.path.dirname(__file__)
- warning_image_file = os.path.abspath(os.path.join(_file_path, "../static/warning_icon.png"))
+ warning_image_file = os.path.abspath(
+ os.path.join(_file_path, "../static/warning_icon.png")
+ )
warning_image = QPixmap(warning_image_file)
self.ui.warning_label.setPixmap(warning_image)
diff --git a/notebooks/__code/registration/registration_marker.py b/notebooks/__code/registration/registration_marker.py
index 7a59d279..685b3f76 100755
--- a/notebooks/__code/registration/registration_marker.py
+++ b/notebooks/__code/registration/registration_marker.py
@@ -70,7 +70,9 @@ def resizing_column(self, index_column, old_size, new_size):
_table_ui = self.parent.markers_table[_key]["ui"]
if not (_table_ui == _live_table_ui):
for _col, _size in enumerate(self.parent.markers_table_column_width):
- _table_ui.setColumnWidth(_col, self.parent.markers_table_column_width[_col])
+ _table_ui.setColumnWidth(
+ _col, self.parent.markers_table_column_width[_col]
+ )
def init_widgets(self):
if self.parent.markers_table == {}:
@@ -109,14 +111,20 @@ def populate_using_markers_table(self):
selected_row = o_main_table.get_row_selected()
for _col, _size in enumerate(self.parent.markers_table_column_width):
- _table.setColumnWidth(_col, self.parent.markers_table_column_width[_col])
+ _table.setColumnWidth(
+ _col, self.parent.markers_table_column_width[_col]
+ )
_table.horizontalHeader().sectionResized.connect(self.resizing_column)
_table.cellClicked.connect(
- lambda row=0, column=0, tab_index=_key_tab_name: self.table_row_clicked(row, column, tab_index)
+ lambda row=0, column=0, tab_index=_key_tab_name: self.table_row_clicked(
+ row, column, tab_index
+ )
)
_table.itemSelectionChanged.connect(
- lambda key_tab_name=_key_tab_name: self.cell_clicked(key_tab_name=_key_tab_name)
+ lambda key_tab_name=_key_tab_name: self.cell_clicked(
+ key_tab_name=_key_tab_name
+ )
)
_data_dict = self.parent.markers_table[_key_tab_name]["data"]
@@ -251,7 +259,9 @@ def copy_cell(self, row_selected=-1, column_selected=-1):
cell_value = str(table_ui.item(row_selected, column_selected).text())
self.parent.marker_table_buffer_cell = cell_value
- def paste_cell(self, top_row_selected=-1, bottom_row_selected=-1, column_selected=-1):
+ def paste_cell(
+ self, top_row_selected=-1, bottom_row_selected=-1, column_selected=-1
+ ):
cell_contain_to_copy = self.parent.marker_table_buffer_cell
table_ui = self.get_current_table_ui()
markers_table = self.parent.markers_table
@@ -262,7 +272,9 @@ def paste_cell(self, top_row_selected=-1, bottom_row_selected=-1, column_selecte
marker_axis = "y"
for _row in np.arange(top_row_selected, bottom_row_selected + 1):
_file = str(table_ui.item(_row, 0).text())
- markers_table[marker_name]["data"][_file][marker_axis] = cell_contain_to_copy
+ markers_table[marker_name]["data"][_file][marker_axis] = (
+ cell_contain_to_copy
+ )
table_ui.item(_row, column_selected).setText(str(cell_contain_to_copy))
self.parent.markers_table = markers_table
@@ -328,7 +340,9 @@ def table_right_click(self, position):
menu.addSeparator()
self.start_marker = menu.addAction("Set marker interpolation initial position")
- self.end_marker = menu.addAction("Set marker interpolation final position and process intermediate markers")
+ self.end_marker = menu.addAction(
+ "Set marker interpolation final position and process intermediate markers"
+ )
if self.parent.markers_initial_position["row"] is None:
self.end_marker.setEnabled(False)
@@ -338,7 +352,9 @@ def table_right_click(self, position):
action = menu.exec_(QtGui.QCursor.pos())
if action == copy_cell:
- self.copy_cell(row_selected=top_row_selected, column_selected=left_column_selected)
+ self.copy_cell(
+ row_selected=top_row_selected, column_selected=left_column_selected
+ )
elif action == paste_cell:
self.paste_cell(
@@ -359,7 +375,9 @@ def start_marker_initialized(self):
self.parent.markers_initial_position["row"] = row_selected
self.parent.markers_initial_position["tab_name"] = tab_selected
o_table = TableHandler(table_ui=self.parent.markers_table[tab_selected]["ui"])
- o_table.set_item_with_str(row=row_selected, column=3, cell_str="Interpolation starting position")
+ o_table.set_item_with_str(
+ row=row_selected, column=3, cell_str="Interpolation starting position"
+ )
def end_marker_initialized(self):
tab_selected = self.get_current_active_tab()
@@ -384,10 +402,14 @@ def end_marker_initialized(self):
coeff = 1
for _row in np.arange(from_row + 1, to_row):
xoffset_value = int(np.round(xoffset_from + coeff * delta_xoffset))
- o_table.set_item_with_str(row=_row, column=1, cell_str=str(xoffset_value))
+ o_table.set_item_with_str(
+ row=_row, column=1, cell_str=str(xoffset_value)
+ )
yoffset_value = int(np.round(yoffset_from + coeff * delta_yoffset))
- o_table.set_item_with_str(row=_row, column=2, cell_str=str(yoffset_value))
+ o_table.set_item_with_str(
+ row=_row, column=2, cell_str=str(yoffset_value)
+ )
coeff += 1
self.parent.markers_initial_position["row"] = None
@@ -443,7 +465,12 @@ def add_marker_button_clicked(self):
x = self.parent.o_MarkerDefaultSettings.x
y = self.parent.o_MarkerDefaultSettings.y
self.__populate_table_row(table, _row, _short_file, x, y)
- _data_dict[_short_file] = {"x": x, "y": y, "marker_ui": None, "label_ui": None}
+ _data_dict[_short_file] = {
+ "x": x,
+ "y": y,
+ "marker_ui": None,
+ "label_ui": None,
+ }
_marker_dict["data"] = _data_dict
@@ -452,7 +479,9 @@ def add_marker_button_clicked(self):
self.ui.tabWidget.setCurrentIndex(number_of_tabs - 1)
table.itemChanged.connect(self.table_cell_modified)
table.itemSelectionChanged.connect(
- lambda key_tab_name=new_marker_name: self.cell_clicked(key_tab_name=new_marker_name)
+ lambda key_tab_name=new_marker_name: self.cell_clicked(
+ key_tab_name=new_marker_name
+ )
)
self.parent.markers_table[new_marker_name] = _marker_dict
@@ -520,8 +549,12 @@ def run_registration_button_clicked(self):
for _marker in markers_table.keys():
_list_files = markers_table[_marker]["data"]
for _file in _list_files:
- markers_list[_file]["x"].append(markers_table[_marker]["data"][_file]["x"])
- markers_list[_file]["y"].append(markers_table[_marker]["data"][_file]["y"])
+ markers_list[_file]["x"].append(
+ markers_table[_marker]["data"][_file]["x"]
+ )
+ markers_list[_file]["y"].append(
+ markers_table[_marker]["data"][_file]["y"]
+ )
step += 1
self.parent.eventProgress.setValue(step)
diff --git a/notebooks/__code/registration/registration_profile.py b/notebooks/__code/registration/registration_profile.py
index 45ad5ed7..62bf1091 100755
--- a/notebooks/__code/registration/registration_profile.py
+++ b/notebooks/__code/registration/registration_profile.py
@@ -28,7 +28,9 @@
from __code._utilities.color import Color
from __code._utilities.file import make_or_reset_folder
from __code.registration import interact_me_style, normal_style
-from __code.registration.calculate_profiles_difference import CalculateProfilesDifference
+from __code.registration.calculate_profiles_difference import (
+ CalculateProfilesDifference,
+)
class RegistrationProfileLauncher:
@@ -370,7 +372,9 @@ def init_reference_image(self):
self.reference_image_short_name = self.parent.reference_image_short_name
else:
self.reference_image = self.data_dict["data"][self.reference_image_index]
- self.reference_image_short_name = os.path.basename(self.data_dict["file_name"][self.reference_image_index])
+ self.reference_image_short_name = os.path.basename(
+ self.data_dict["file_name"][self.reference_image_index]
+ )
def init_table(self):
data_dict = self.data_dict
@@ -554,7 +558,9 @@ def replot_profile_lines(self, is_horizontal=False):
def update_selected_file_profile_plots(self, is_horizontal=True):
index_selected = self._get_selected_row()
- self.update_single_profile(file_selected=index_selected, is_horizontal=is_horizontal)
+ self.update_single_profile(
+ file_selected=index_selected, is_horizontal=is_horizontal
+ )
def update_single_profile(self, file_selected=-1, is_horizontal=True):
if is_horizontal:
@@ -568,7 +574,9 @@ def update_single_profile(self, file_selected=-1, is_horizontal=True):
profile_2d_ui.clear()
# always display the reference image
- [xaxis, ref_profile] = self.get_profile(image_index=self.reference_image_index, is_horizontal=is_horizontal)
+ [xaxis, ref_profile] = self.get_profile(
+ image_index=self.reference_image_index, is_horizontal=is_horizontal
+ )
try:
profile_2d_ui.plot(xaxis, ref_profile, pen=self.roi[label]["color-peak"])
@@ -576,10 +584,16 @@ def update_single_profile(self, file_selected=-1, is_horizontal=True):
pass
if file_selected != self.reference_image_index:
- [xaxis, selected_profile] = self.get_profile(image_index=file_selected, is_horizontal=is_horizontal)
+ [xaxis, selected_profile] = self.get_profile(
+ image_index=file_selected, is_horizontal=is_horizontal
+ )
try:
- profile_2d_ui.plot(xaxis, selected_profile, pen=self.list_rgb_profile_color[file_selected])
+ profile_2d_ui.plot(
+ xaxis,
+ selected_profile,
+ pen=self.list_rgb_profile_color[file_selected],
+ )
except Exception:
pass
@@ -628,10 +642,14 @@ def calculate_all_profiles(self):
def calculate_profile(self, file_index=-1, is_horizontal=True):
if is_horizontal:
- [xaxis, profile] = self.get_profile(image_index=file_index, is_horizontal=True)
+ [xaxis, profile] = self.get_profile(
+ image_index=file_index, is_horizontal=True
+ )
label = "horizontal"
else:
- [xaxis, profile] = self.get_profile(image_index=file_index, is_horizontal=False)
+ [xaxis, profile] = self.get_profile(
+ image_index=file_index, is_horizontal=False
+ )
label = "vertical"
_profile = {}
@@ -744,7 +762,9 @@ def register_images(self):
self.eventProgress.setVisible(False)
QApplication.processEvents()
- def calculate_and_display_current_peak(self, force_recalculation=True, is_horizontal=True):
+ def calculate_and_display_current_peak(
+ self, force_recalculation=True, is_horizontal=True
+ ):
if is_horizontal:
label = "horizontal"
else:
@@ -765,8 +785,12 @@ def calculate_and_display_current_peak(self, force_recalculation=True, is_horizo
self.display_current_peak(is_horizontal=is_horizontal)
if force_recalculation:
- self.calculate_profile(file_index=index_selected, is_horizontal=is_horizontal)
- self.recalculate_current_peak(file_index=index_selected, is_horizontal=is_horizontal)
+ self.calculate_profile(
+ file_index=index_selected, is_horizontal=is_horizontal
+ )
+ self.recalculate_current_peak(
+ file_index=index_selected, is_horizontal=is_horizontal
+ )
self.display_current_peak(is_horizontal=is_horizontal)
@@ -802,8 +826,12 @@ def display_current_peak(self, is_horizontal=True):
self.verti_infinite_line_ui = infinite_line_ui
def calculate_and_display_hori_and_verti_peaks(self, force_recalculation=True):
- self.calculate_and_display_current_peak(force_recalculation=force_recalculation, is_horizontal=True)
- self.calculate_and_display_current_peak(force_recalculation=force_recalculation, is_horizontal=False)
+ self.calculate_and_display_current_peak(
+ force_recalculation=force_recalculation, is_horizontal=True
+ )
+ self.calculate_and_display_current_peak(
+ force_recalculation=force_recalculation, is_horizontal=False
+ )
def copy_register_parameters_to_main_table(self):
nbr_row = self.ui.tableWidget.rowCount()
@@ -825,7 +853,9 @@ def full_reset(self):
def vertical_roi_moved(self):
"""when the vertical roi is moved, we need to make sure the width stays within the max we defined
and we need refresh the peak calculation"""
- region = self.vertical_profile.getArraySlice(self.live_image, self.ui.image_view.imageItem)
+ region = self.vertical_profile.getArraySlice(
+ self.live_image, self.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop
@@ -846,7 +876,9 @@ def vertical_roi_moved(self):
def horizontal_roi_moved(self):
"""when the horizontal roi is moved, we need to make sure the height stays within the max we defined
and we need to refresh the peak calculation"""
- region = self.horizontal_profile.getArraySlice(self.live_image, self.ui.image_view.imageItem)
+ region = self.horizontal_profile.getArraySlice(
+ self.live_image, self.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop
@@ -891,7 +923,9 @@ def calculate_profiles_differences(self):
def help_button_clicked(self):
import webbrowser
- webbrowser.open("https://neutronimaging.ornl.gov/tutorials/imaging-notebooks/registration/")
+ webbrowser.open(
+ "https://neutronimaging.ornl.gov/tutorials/imaging-notebooks/registration/"
+ )
def slider_file_changed(self, value):
self._select_table_row(value)
@@ -939,10 +973,17 @@ def export_button_clicked(self):
"""save registered images back to the main UI"""
# self.registered_all_images_button_clicked()
_export_folder = QFileDialog.getExistingDirectory(
- self, directory=self.working_dir, caption="Select Output Folder", options=QFileDialog.ShowDirsOnly
+ self,
+ directory=self.working_dir,
+ caption="Select Output Folder",
+ options=QFileDialog.ShowDirsOnly,
)
if _export_folder:
- o_export = ExportRegistration(parent=self, input_working_dir=self.working_dir, export_folder=_export_folder)
+ o_export = ExportRegistration(
+ parent=self,
+ input_working_dir=self.working_dir,
+ export_folder=_export_folder,
+ )
o_export.run()
QApplication.processEvents()
@@ -1058,7 +1099,9 @@ def run(self):
self.parent.eventProgress.setValue(0)
self.parent.eventProgress.setVisible(True)
- export_folder = os.path.join(self.export_folder, self.input_dir_name + "_registered")
+ export_folder = os.path.join(
+ self.export_folder, self.input_dir_name + "_registered"
+ )
make_or_reset_folder(export_folder)
for _row, _data in enumerate(data_dict["data"]):
diff --git a/notebooks/__code/rename_files.py b/notebooks/__code/rename_files.py
index 4bb93a70..4016b4a0 100755
--- a/notebooks/__code/rename_files.py
+++ b/notebooks/__code/rename_files.py
@@ -55,8 +55,12 @@ def __init__(self, o_format=None):
self.working_dir = o_format.working_dir
if self.list_files:
- _random_input_list = utilities.get_n_random_element(input_list=self.list_files, n=10)
- self.random_input_list = [os.path.basename(_file) for _file in _random_input_list]
+ _random_input_list = utilities.get_n_random_element(
+ input_list=self.list_files, n=10
+ )
+ self.random_input_list = [
+ os.path.basename(_file) for _file in _random_input_list
+ ]
self.basename = os.path.basename(self.list_files[0])
@@ -127,16 +131,22 @@ def show(self):
# current schema name
self.box2 = widgets.HBox(
[
- widgets.Label("Pre. Index Separator", layout=widgets.Layout(width="15%")),
+ widgets.Label(
+ "Pre. Index Separator", layout=widgets.Layout(width="15%")
+ ),
widgets.Text(value="_", layout=widgets.Layout(width="5%")),
]
)
self.box2b = widgets.HBox(
[
- widgets.Label("Untouched filename part:", layout=widgets.Layout(width="20%")),
+ widgets.Label(
+ "Untouched filename part:", layout=widgets.Layout(width="20%")
+ ),
widgets.Label("", layout=widgets.Layout(width="40%")),
- widgets.IntRangeSlider(value=[0, 2], min=0, max=len(self.basename), step=1),
+ widgets.IntRangeSlider(
+ value=[0, 2], min=0, max=len(self.basename), step=1
+ ),
]
)
self.int_range_slider = self.box2b.children[2]
@@ -145,11 +155,17 @@ def show(self):
self.box4 = widgets.HBox(
[
- widgets.Label("Current Name Schema: ", layout=widgets.Layout(width="20%")),
- widgets.Label(self.current_naming_schema(), layout=widgets.Layout(width="30%")),
+ widgets.Label(
+ "Current Name Schema: ", layout=widgets.Layout(width="20%")
+ ),
+ widgets.Label(
+ self.current_naming_schema(), layout=widgets.Layout(width="30%")
+ ),
widgets.Label("Random Input:", layout=widgets.Layout(width="15%")),
widgets.Dropdown(
- options=self.random_input_list, value=self.random_input_list[0], layout=widgets.Layout(width="50%")
+ options=self.random_input_list,
+ value=self.random_input_list[0],
+ layout=widgets.Layout(width="50%"),
),
]
)
@@ -157,36 +173,54 @@ def show(self):
self.box2.children[1].on_trait_change(self.pre_index_text_changed, "value")
before = widgets.VBox([self.box2, self.box2b, self.box4])
self.random_input_checkbox = self.box4.children[3]
- self.random_input_checkbox.observe(self.random_input_checkbox_value_changed, "value")
+ self.random_input_checkbox.observe(
+ self.random_input_checkbox_value_changed, "value"
+ )
# new naming schema
box_text_width = "10%"
self.box1 = widgets.HBox(
[
- widgets.Label("New prefix File Name", layout=widgets.Layout(width="10%")),
+ widgets.Label(
+ "New prefix File Name", layout=widgets.Layout(width="10%")
+ ),
widgets.Checkbox(
- value=True, description="Use previous prefix name", layout=widgets.Layout(width="30%")
+ value=True,
+ description="Use previous prefix name",
+ layout=widgets.Layout(width="30%"),
),
]
)
self.use_previous_prefix_widget = self.box1.children[1]
- self.box1.children[1].observe(self.changed_use_previous_prefix_name, names="value")
+ self.box1.children[1].observe(
+ self.changed_use_previous_prefix_name, names="value"
+ )
self.box1b = widgets.HBox(
[
widgets.Label("", layout=widgets.Layout(width="10%")),
- widgets.Checkbox(value=False, description="Use new prefix", layout=widgets.Layout(width="20%")),
- widgets.Text(value="image", disabled=True, layout=widgets.Layout(width="25%")),
+ widgets.Checkbox(
+ value=False,
+ description="Use new prefix",
+ layout=widgets.Layout(width="20%"),
+ ),
+ widgets.Text(
+ value="image", disabled=True, layout=widgets.Layout(width="25%")
+ ),
]
)
self.box1b.children[2].observe(self.changed_use_new_prefix_name, names="value")
self.new_prefix_text_widget = self.box1b.children[2]
self.user_new_prefix_widget = self.box1b.children[1]
- self.user_new_prefix_widget.observe(self.changed_use_new_prefix_name, names="value")
+ self.user_new_prefix_widget.observe(
+ self.changed_use_new_prefix_name, names="value"
+ )
self.box5 = widgets.HBox(
[
- widgets.Label("New Index Separator", layout=widgets.Layout(width="15%")),
+ widgets.Label(
+ "New Index Separator", layout=widgets.Layout(width="15%")
+ ),
widgets.Text(value="_", layout=widgets.Layout(width=box_text_width)),
]
)
@@ -208,7 +242,9 @@ def show(self):
self.box6 = widgets.HBox(
[
widgets.Label("New Name Schema: ", layout=widgets.Layout(width="20%")),
- widgets.Label(self.new_naming_schema(), layout=widgets.Layout(width="40%")),
+ widgets.Label(
+ self.new_naming_schema(), layout=widgets.Layout(width="40%")
+ ),
]
)
@@ -217,13 +253,17 @@ def show(self):
self.box7.children[1].on_trait_change(self.post_text_changed, "value")
self.box8.children[1].on_trait_change(self.post_text_changed, "value")
- after = widgets.VBox([self.box1, self.box1b, self.box5, self.box7, self.box8, self.box6])
+ after = widgets.VBox(
+ [self.box1, self.box1b, self.box5, self.box7, self.box8, self.box6]
+ )
accordion = widgets.Accordion(children=[before, after])
accordion.set_title(0, "Current Schema Name")
accordion.set_title(1, "New Naming Schema")
- output_ui_1 = widgets.HBox([widgets.Label("Example of naming: ", layout=widgets.Layout(width="20%"))])
+ output_ui_1 = widgets.HBox(
+ [widgets.Label("Example of naming: ", layout=widgets.Layout(width="20%"))]
+ )
self.output_ui_2 = widgets.HBox(
[
@@ -240,7 +280,9 @@ def show(self):
)
self.output_ui_3.children[1].add_class("result_label")
- vbox = widgets.VBox([accordion, output_ui_1, self.output_ui_2, self.output_ui_3])
+ vbox = widgets.VBox(
+ [accordion, output_ui_1, self.output_ui_2, self.output_ui_3]
+ )
display(vbox)
self.demo_output_file_name()
@@ -248,7 +290,9 @@ def show(self):
self.changed_use_new_prefix_name()
def demo_output_file_name(self):
- input_file = self.get_basename_of_current_dropdown_selected_file(is_with_ext=True)
+ input_file = self.get_basename_of_current_dropdown_selected_file(
+ is_with_ext=True
+ )
self.output_ui_2.children[1].value = input_file
old_index_separator = self.get_old_index_separator()
@@ -340,7 +384,12 @@ def generate_new_file_name(
try:
_index = int(_name_separated[-1]) + offset
- new_name = prefix + new_index_separator + "{:0{}}".format(_index, new_number_of_digits) + self.ext
+ new_name = (
+ prefix
+ + new_index_separator
+ + "{:0{}}".format(_index, new_number_of_digits)
+ + self.ext
+ )
except ValueError:
_index = _name_separated[-1]
new_name = prefix + new_index_separator + _index + self.ext
@@ -361,7 +410,9 @@ def get_dict_old_new_filenames(self):
new_number_of_digits = self.get_new_number_of_digits()
offset = self.box8.children[1].value
- list_of_input_basename_files = [os.path.basename(_file) for _file in list_of_input_files]
+ list_of_input_basename_files = [
+ os.path.basename(_file) for _file in list_of_input_files
+ ]
new_list = {}
for _file_index, _file in enumerate(list_of_input_basename_files):
@@ -392,7 +443,11 @@ def select_export_folder(self):
type="directory",
)
else:
- display(HTML('You need to fix the namig convention first!'))
+ display(
+ HTML(
+ 'You need to fix the namig convention first!'
+ )
+ )
def export(self, selected):
self.output_folder_ui.shortcut_buttons.close()
@@ -401,7 +456,9 @@ def export(self, selected):
new_output_folder = os.path.abspath(selected)
utilities.copy_files(
- dict_old_new_names=dict_old_new_names, new_output_folder=new_output_folder, overwrite=False
+ dict_old_new_names=dict_old_new_names,
+ new_output_folder=new_output_folder,
+ overwrite=False,
)
self.new_list_files = dict_old_new_names
@@ -420,7 +477,9 @@ def display_renaming_result(self, selected):
[
widgets.Label("Renmaing results: ", layout=widgets.Layout(width="20%")),
widgets.Dropdown(
- options=self.renaming_result, value=self.renaming_result[0], layout=widgets.Layout(width="80%")
+ options=self.renaming_result,
+ value=self.renaming_result[0],
+ layout=widgets.Layout(width="80%"),
),
]
)
diff --git a/notebooks/__code/rename_files/rename_files.py b/notebooks/__code/rename_files/rename_files.py
index 6dd23a9a..2654779e 100755
--- a/notebooks/__code/rename_files/rename_files.py
+++ b/notebooks/__code/rename_files/rename_files.py
@@ -15,7 +15,9 @@ def __init__(self, working_dir=""):
def select_input_files(self):
self.input_files_ui = fileselector.FileSelectorPanel(
- instruction="Select List of Files", start_dir=self.working_dir, multiple=True
+ instruction="Select List of Files",
+ start_dir=self.working_dir,
+ multiple=True,
)
self.input_files_ui.show()
@@ -80,8 +82,12 @@ def __init__(self, o_format=None):
raise ValueError("FormatFileNameIndex object is missing!")
if self.list_files:
- _random_input_list = utilities.get_n_random_element(input_list=self.list_files, n=10)
- self.random_input_list = [os.path.basename(_file) for _file in _random_input_list]
+ _random_input_list = utilities.get_n_random_element(
+ input_list=self.list_files, n=10
+ )
+ self.random_input_list = [
+ os.path.basename(_file) for _file in _random_input_list
+ ]
self.basename = os.path.basename(self.list_files[0])
@@ -172,16 +178,22 @@ def show(self):
# current schema name
self.box2 = widgets.HBox(
[
- widgets.Label("Pre. Index Separator", layout=widgets.Layout(width="15%")),
+ widgets.Label(
+ "Pre. Index Separator", layout=widgets.Layout(width="15%")
+ ),
widgets.Text(value="_", layout=widgets.Layout(width="5%")),
]
)
self.box2b = widgets.HBox(
[
- widgets.Label("Untouched filename part:", layout=widgets.Layout(width="20%")),
+ widgets.Label(
+ "Untouched filename part:", layout=widgets.Layout(width="20%")
+ ),
widgets.Label("", layout=widgets.Layout(width="40%")),
- widgets.IntRangeSlider(value=[0, 2], min=0, max=len(self.basename), step=1),
+ widgets.IntRangeSlider(
+ value=[0, 2], min=0, max=len(self.basename), step=1
+ ),
]
)
self.int_range_slider = self.box2b.children[2]
@@ -190,11 +202,17 @@ def show(self):
self.box4 = widgets.HBox(
[
- widgets.Label("Current Name Schema: ", layout=widgets.Layout(width="20%")),
- widgets.Label(self.current_naming_schema(), layout=widgets.Layout(width="30%")),
+ widgets.Label(
+ "Current Name Schema: ", layout=widgets.Layout(width="20%")
+ ),
+ widgets.Label(
+ self.current_naming_schema(), layout=widgets.Layout(width="30%")
+ ),
widgets.Label("Random Input:", layout=widgets.Layout(width="15%")),
widgets.Dropdown(
- options=self.random_input_list, value=self.random_input_list[0], layout=widgets.Layout(width="50%")
+ options=self.random_input_list,
+ value=self.random_input_list[0],
+ layout=widgets.Layout(width="50%"),
),
]
)
@@ -202,57 +220,93 @@ def show(self):
self.box2.children[1].on_trait_change(self.pre_index_text_changed, "value")
before = widgets.VBox([self.box2, self.box2b, self.box4])
self.random_input_checkbox = self.box4.children[3]
- self.random_input_checkbox.observe(self.random_input_checkbox_value_changed, "value")
+ self.random_input_checkbox.observe(
+ self.random_input_checkbox_value_changed, "value"
+ )
# new naming schema
box_text_width = "10%"
self.box1 = widgets.HBox(
[
- widgets.Label("New prefix File Name", layout=widgets.Layout(width="20%")),
+ widgets.Label(
+ "New prefix File Name", layout=widgets.Layout(width="20%")
+ ),
widgets.Checkbox(
- value=True, description="Use previous prefix name", layout=widgets.Layout(width="30%")
+ value=True,
+ description="Use previous prefix name",
+ layout=widgets.Layout(width="30%"),
),
]
)
self.use_previous_prefix_widget = self.box1.children[1]
- self.box1.children[1].observe(self.changed_use_previous_prefix_name, names="value")
+ self.box1.children[1].observe(
+ self.changed_use_previous_prefix_name, names="value"
+ )
self.box1b = widgets.HBox(
[
widgets.Label("", layout=widgets.Layout(width="20%")),
- widgets.Checkbox(value=False, description="Use new prefix", layout=widgets.Layout(width="20%")),
- widgets.Text(value="image", disabled=True, layout=widgets.Layout(width="25%")),
+ widgets.Checkbox(
+ value=False,
+ description="Use new prefix",
+ layout=widgets.Layout(width="20%"),
+ ),
+ widgets.Text(
+ value="image", disabled=True, layout=widgets.Layout(width="25%")
+ ),
]
)
self.box1b.children[2].observe(self.changed_use_new_prefix_name, names="value")
self.new_prefix_text_widget = self.box1b.children[2]
self.user_new_prefix_widget = self.box1b.children[1]
- self.user_new_prefix_widget.observe(self.changed_use_new_prefix_name, names="value")
+ self.user_new_prefix_widget.observe(
+ self.changed_use_new_prefix_name, names="value"
+ )
self.suffix_box1 = widgets.HBox(
[
- widgets.Label("New suffix File Name", layout=widgets.Layout(width="20%")),
- widgets.Checkbox(value=True, description="Use digit suffix", layout=widgets.Layout(width="30%")),
+ widgets.Label(
+ "New suffix File Name", layout=widgets.Layout(width="20%")
+ ),
+ widgets.Checkbox(
+ value=True,
+ description="Use digit suffix",
+ layout=widgets.Layout(width="30%"),
+ ),
]
)
self.use_digit_suffix_widget = self.suffix_box1.children[1]
- self.suffix_box1.children[1].observe(self.changed_use_digit_suffix_name, names="value")
+ self.suffix_box1.children[1].observe(
+ self.changed_use_digit_suffix_name, names="value"
+ )
self.suffix_box2 = widgets.HBox(
[
widgets.Label("", layout=widgets.Layout(width="20%")),
- widgets.Checkbox(value=False, description="Use new suffix", layout=widgets.Layout(width="20%")),
- widgets.Text(value="", disabled=True, layout=widgets.Layout(width="25%")),
+ widgets.Checkbox(
+ value=False,
+ description="Use new suffix",
+ layout=widgets.Layout(width="20%"),
+ ),
+ widgets.Text(
+ value="", disabled=True, layout=widgets.Layout(width="25%")
+ ),
]
)
- self.suffix_box2.children[2].observe(self.changed_use_new_suffix_name, names="value")
+ self.suffix_box2.children[2].observe(
+ self.changed_use_new_suffix_name, names="value"
+ )
self.user_new_suffix_widget = self.suffix_box2.children[1]
self.new_suffix_text_widget = self.suffix_box2.children[2]
- self.user_new_suffix_widget.observe(self.changed_use_new_suffix_name, names="value")
+ self.user_new_suffix_widget.observe(
+ self.changed_use_new_suffix_name, names="value"
+ )
self.box5 = widgets.HBox(
[
- widgets.Label("New Index Separator", layout=widgets.Layout(width="15%")),
+ widgets.Label(
+ "New Index Separator", layout=widgets.Layout(width="15%")
+ ),
widgets.Text(value="_", layout=widgets.Layout(width=box_text_width)),
]
)
@@ -275,14 +329,18 @@ def show(self):
self.box9 = widgets.HBox(
[
widgets.Label("Extension", layout=widgets.Layout(width="15%")),
- widgets.Text(value=current_ext, layout=widgets.Layout(width=box_text_width)),
+ widgets.Text(
+ value=current_ext, layout=widgets.Layout(width=box_text_width)
+ ),
]
)
self.box6 = widgets.HBox(
[
widgets.Label("New Name Schema: ", layout=widgets.Layout(width="20%")),
- widgets.Label(self.new_naming_schema(), layout=widgets.Layout(width="40%")),
+ widgets.Label(
+ self.new_naming_schema(), layout=widgets.Layout(width="40%")
+ ),
]
)
@@ -310,7 +368,9 @@ def show(self):
accordion.set_title(0, "Current Schema Name")
accordion.set_title(1, "New Naming Schema")
- output_ui_1 = widgets.HBox([widgets.Label("Example of naming: ", layout=widgets.Layout(width="20%"))])
+ output_ui_1 = widgets.HBox(
+ [widgets.Label("Example of naming: ", layout=widgets.Layout(width="20%"))]
+ )
self.output_ui_2 = widgets.HBox(
[
@@ -327,7 +387,9 @@ def show(self):
)
self.output_ui_3.children[1].add_class("result_label")
- vbox = widgets.VBox([accordion, output_ui_1, self.output_ui_2, self.output_ui_3])
+ vbox = widgets.VBox(
+ [accordion, output_ui_1, self.output_ui_2, self.output_ui_3]
+ )
display(vbox)
self.demo_output_file_name()
@@ -335,7 +397,9 @@ def show(self):
self.changed_use_new_prefix_name()
def demo_output_file_name(self):
- input_file = self.get_basename_of_current_dropdown_selected_file(is_with_ext=False)
+ input_file = self.get_basename_of_current_dropdown_selected_file(
+ is_with_ext=False
+ )
self.output_ui_2.children[1].value = input_file
new_name = self.box6.children[1].value
@@ -452,7 +516,12 @@ def generate_new_file_name(
if suffix_flag:
new_name = prefix + new_index_separator + suffix + ext
else:
- new_name = prefix + new_index_separator + "{:0{}}".format(file_index, new_number_of_digits) + ext
+ new_name = (
+ prefix
+ + new_index_separator
+ + "{:0{}}".format(file_index, new_number_of_digits)
+ + ext
+ )
# except ValueError:
# # print(f"Could not parse index from {_name_separated[-1]}")
@@ -484,7 +553,9 @@ def get_dict_old_new_filenames(self):
else:
suffix = ""
- list_of_input_basename_files = [os.path.basename(_file) for _file in list_of_input_files]
+ list_of_input_basename_files = [
+ os.path.basename(_file) for _file in list_of_input_files
+ ]
new_list = {}
for _file_index, _file in enumerate(list_of_input_basename_files):
@@ -511,8 +582,14 @@ def get_dict_old_new_filenames(self):
def check_new_names(self):
dict_old_new_names = self.get_dict_old_new_filenames()
- old_names_new_names = [f"{os.path.basename(_key)} -> {_value}" for _key, _value in dict_old_new_names.items()]
- select_widget = widgets.Select(options=old_names_new_names, layout=widgets.Layout(width="100%", height="400px"))
+ old_names_new_names = [
+ f"{os.path.basename(_key)} -> {_value}"
+ for _key, _value in dict_old_new_names.items()
+ ]
+ select_widget = widgets.Select(
+ options=old_names_new_names,
+ layout=widgets.Layout(width="100%", height="400px"),
+ )
display(select_widget)
def select_export_folder(self):
@@ -527,7 +604,11 @@ def select_export_folder(self):
type="directory",
)
else:
- display(HTML('You need to fix the namig convention first!'))
+ display(
+ HTML(
+ 'You need to fix the namig convention first!'
+ )
+ )
def export(self, selected):
input_folder = os.path.abspath(self.input_folder)
@@ -535,10 +616,14 @@ def export(self, selected):
self.output_folder_ui.shortcut_buttons.close()
dict_old_new_names = self.get_dict_old_new_filenames()
- new_output_folder = os.path.join(os.path.abspath(selected), input_folder_renamed)
+ new_output_folder = os.path.join(
+ os.path.abspath(selected), input_folder_renamed
+ )
utilities.copy_files(
- dict_old_new_names=dict_old_new_names, new_output_folder=new_output_folder, overwrite=False
+ dict_old_new_names=dict_old_new_names,
+ new_output_folder=new_output_folder,
+ overwrite=False,
)
self.new_list_files = dict_old_new_names
diff --git a/notebooks/__code/resonance_fitting/__init__.py b/notebooks/__code/resonance_fitting/__init__.py
index 6accc17d..a60c77f1 100644
--- a/notebooks/__code/resonance_fitting/__init__.py
+++ b/notebooks/__code/resonance_fitting/__init__.py
@@ -44,11 +44,12 @@ class DetectorType:
},
}
-VENUS_RES_FUNC=Path("/SNS/VENUS/shared/instrument/resonance/_fts_bl10_0p5meV_1keV_25pts.txt")
-SAMMY_EXE_PATH=Path("/SNS/software/sammy/bin/sammy")
+VENUS_RES_FUNC = Path(
+ "/SNS/VENUS/shared/instrument/resonance/_fts_bl10_0p5meV_1keV_25pts.txt"
+)
+SAMMY_EXE_PATH = Path("/SNS/software/sammy/bin/sammy")
-class Parent:
+class Parent:
def __init__(self, parent=None):
self.parent = parent
-
\ No newline at end of file
diff --git a/notebooks/__code/resonance_fitting/get.py b/notebooks/__code/resonance_fitting/get.py
index 79e325bb..bc301e6f 100644
--- a/notebooks/__code/resonance_fitting/get.py
+++ b/notebooks/__code/resonance_fitting/get.py
@@ -2,19 +2,18 @@
class Get(Parent):
-
def full_name_of_element_from_abreviation(self, abbreviation: str) -> str:
"""Get the full name of an element from its abbreviation.
Args:
- abbreviation (str): The abbreviation of the element (e.g., 'H' for Hydrogen).
-
+ abbreviation (str): The abbreviation of the element (e.g., 'H' for Hydrogen).
+
Returns:
str: The full name of the element.
"""
dict_elements = self.parent.dict_elements
for _element_name in dict_elements.keys():
- _abbreviation = dict_elements[_element_name]['symbol']
+ _abbreviation = dict_elements[_element_name]["symbol"]
if _abbreviation == abbreviation:
return _element_name
raise ValueError(f"Element with abbreviation '{abbreviation}' not found.")
diff --git a/notebooks/__code/resonance_fitting/normalization_for_timepix.py b/notebooks/__code/resonance_fitting/normalization_for_timepix.py
index 245f8c22..c4d9b694 100644
--- a/notebooks/__code/resonance_fitting/normalization_for_timepix.py
+++ b/notebooks/__code/resonance_fitting/normalization_for_timepix.py
@@ -73,7 +73,9 @@ def _worker(fl):
return (imread(fl).astype(LOAD_DTYPE)).swapaxes(0, 1)
-def load_data_using_multithreading(list_tif: list = None, combine_tof: bool = False) -> np.ndarray:
+def load_data_using_multithreading(
+ list_tif: list = None, combine_tof: bool = False
+) -> np.ndarray:
"""load data using multithreading"""
with mp.Pool(processes=40) as pool:
data = pool.map(_worker, list_tif)
@@ -92,7 +94,9 @@ def retrieve_list_of_tif(folder: str) -> list:
def create_x_axis_file(
- lambda_array: np.ndarray = None, energy_array: np.ndarray = None, output_folder: str = "./"
+ lambda_array: np.ndarray = None,
+ energy_array: np.ndarray = None,
+ output_folder: str = "./",
) -> str:
"""create x axis file with lambda, energy and tof arrays"""
x_axis_data = {
@@ -160,10 +164,16 @@ def normalization_with_list_of_runs(
export_corrected_stack_of_sample_data = export_mode.get("sample_stack", False)
export_corrected_stack_of_ob_data = export_mode.get("ob_stack", False)
- export_corrected_stack_of_normalized_data = export_mode.get("normalized_stack", False)
- export_corrected_integrated_sample_data = export_mode.get("sample_integrated", False)
+ export_corrected_stack_of_normalized_data = export_mode.get(
+ "normalized_stack", False
+ )
+ export_corrected_integrated_sample_data = export_mode.get(
+ "sample_integrated", False
+ )
export_corrected_integrated_ob_data = export_mode.get("ob_integrated", False)
- export_corrected_integrated_normalized_data = export_mode.get("normalized_integrated", False)
+ export_corrected_integrated_normalized_data = export_mode.get(
+ "normalized_integrated", False
+ )
export_x_axis = export_mode.get("x_axis", True)
logging.info(f"{export_corrected_stack_of_sample_data = }")
@@ -181,13 +191,18 @@ def normalization_with_list_of_runs(
nexus_root_path=nexus_path,
)
ob_master_dict, ob_status_metadata = create_master_dict(
- list_run_numbers=ob_run_numbers, data_type=DataType.ob, instrument=instrument, nexus_root_path=nexus_path
+ list_run_numbers=ob_run_numbers,
+ data_type=DataType.ob,
+ instrument=instrument,
+ nexus_root_path=nexus_path,
)
# only for SNAP
if instrument == "SNAP":
for _run in sample_master_dict.keys():
- sample_master_dict[_run][MasterDictKeys.detector_delay_us] = detector_delay_us
+ sample_master_dict[_run][MasterDictKeys.detector_delay_us] = (
+ detector_delay_us
+ )
for _run in ob_master_dict.keys():
ob_master_dict[_run][MasterDictKeys.detector_delay_us] = detector_delay_us
@@ -197,25 +212,32 @@ def normalization_with_list_of_runs(
logging.info(f"loading ob# {_ob_run_number} ... ")
if verbose:
display(HTML(f"Loading ob# {_ob_run_number} ..."))
- ob_master_dict[_ob_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- ob_master_dict[_ob_run_number][MasterDictKeys.list_tif], combine_tof=False
+ ob_master_dict[_ob_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ ob_master_dict[_ob_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"ob# {_ob_run_number} loaded!")
logging.info(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
if verbose:
display(HTML(f"ob# {_ob_run_number} loaded!"))
- display(HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
+ )
if proton_charge_flag:
normalized_by_proton_charge = (
- sample_status_metadata.all_proton_charge_found and ob_status_metadata.all_proton_charge_found
+ sample_status_metadata.all_proton_charge_found
+ and ob_status_metadata.all_proton_charge_found
)
else:
normalized_by_proton_charge = False
if shutter_counts_flag:
normalized_by_shutter_counts = (
- sample_status_metadata.all_shutter_counts_found and ob_status_metadata.all_shutter_counts_found
+ sample_status_metadata.all_shutter_counts_found
+ and ob_status_metadata.all_shutter_counts_found
)
else:
normalized_by_shutter_counts = False
@@ -248,7 +270,9 @@ def normalization_with_list_of_runs(
export_corrected_stack_of_ob_data,
export_corrected_integrated_ob_data,
ob_data_combined,
- spectra_file_name=ob_master_dict[_ob_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=ob_master_dict[_ob_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
# load sample images
@@ -256,22 +280,33 @@ def normalization_with_list_of_runs(
logging.info(f"loading sample# {_sample_run_number} ... ")
if verbose:
display(HTML(f"Loading sample# {_sample_run_number} ..."))
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- sample_master_dict[_sample_run_number][MasterDictKeys.list_tif], combine_tof=False
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ sample_master_dict[_sample_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"sample# {_sample_run_number} loaded!")
- logging.info(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }")
+ logging.info(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
if verbose:
display(HTML(f"sample# {_sample_run_number} loaded!"))
- display(HTML(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
+ )
if correct_chips_alignment_flag:
logging.info("Correcting chips alignment ...")
if verbose:
display(HTML("Correcting chips alignment ..."))
for _sample_run_number in sample_master_dict.keys():
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = correct_chips_alignment(
- sample_master_dict[_sample_run_number][MasterDictKeys.data]
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ correct_chips_alignment(
+ sample_master_dict[_sample_run_number][MasterDictKeys.data]
+ )
)
logging.info("Chips alignment corrected!")
if verbose:
@@ -303,17 +338,25 @@ def normalization_with_list_of_runs(
logging.info("**********************************")
if normalized_by_proton_charge:
- proton_charge = sample_master_dict[_sample_run_number][MasterDictKeys.proton_charge]
+ proton_charge = sample_master_dict[_sample_run_number][
+ MasterDictKeys.proton_charge
+ ]
_sample_data = _sample_data / proton_charge
if normalized_by_shutter_counts:
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=sample_master_dict[_sample_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=sample_master_dict[_sample_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
sample_data = []
- for _sample, _shutter_value in zip(_sample_data, list_shutter_values_for_each_image, strict=False):
+ for _sample, _shutter_value in zip(
+ _sample_data, list_shutter_values_for_each_image, strict=False
+ ):
sample_data.append(_sample / _shutter_value)
_sample_data = np.array(sample_data)
@@ -323,14 +366,19 @@ def normalization_with_list_of_runs(
logging.info(f"{ob_data_combined.dtype = }")
# export sample data after correction if requested
- if export_corrected_stack_of_sample_data or export_corrected_integrated_sample_data:
+ if (
+ export_corrected_stack_of_sample_data
+ or export_corrected_integrated_sample_data
+ ):
export_sample_images(
output_folder,
export_corrected_stack_of_sample_data,
export_corrected_integrated_sample_data,
_sample_run_number,
_sample_data,
- spectra_file_name=sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
# _sample_data = np.divide(_sample_data, ob_data_combined, out=np.zeros_like(_sample_data), where=ob_data_combined!=0)
@@ -350,8 +398,12 @@ def normalization_with_list_of_runs(
logging.info(f"{normalized_data[_sample_run_number].shape = }")
logging.info(f"{normalized_data[_sample_run_number].dtype = }")
- detector_delay_us = sample_master_dict[_sample_run_number][MasterDictKeys.detector_delay_us]
- time_spectra = sample_master_dict[_sample_run_number][MasterDictKeys.list_spectra]
+ detector_delay_us = sample_master_dict[_sample_run_number][
+ MasterDictKeys.detector_delay_us
+ ]
+ time_spectra = sample_master_dict[_sample_run_number][
+ MasterDictKeys.list_spectra
+ ]
lambda_array = convert_array_from_time_to_lambda(
time_array=time_spectra,
@@ -374,11 +426,15 @@ def normalization_with_list_of_runs(
if preview:
# display preview of normalized data
- fig, axs1 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs1 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
sample_data_integrated = np.nanmean(_sample_data, axis=0)
im0 = axs1[0].imshow(sample_data_integrated, cmap="gray")
plt.colorbar(im0, ax=axs1[0])
- axs1[0].set_title(f"Sample data: {_sample_run_number} | detector delay: {detector_delay_us:.2f} us")
+ axs1[0].set_title(
+ f"Sample data: {_sample_run_number} | detector delay: {detector_delay_us:.2f} us"
+ )
sample_integrated1 = np.nansum(_sample_data, axis=1)
sample_integrated = np.nansum(sample_integrated1, axis=1)
@@ -387,7 +443,9 @@ def normalization_with_list_of_runs(
axs1[1].set_ylabel("mean of full image")
plt.tight_layout
- fig, axs2 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs2 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
ob_data_integrated = np.nanmean(ob_data_combined, axis=0)
im1 = axs2[0].imshow(ob_data_integrated, cmap="gray")
plt.colorbar(im1, ax=axs2[0])
@@ -400,8 +458,12 @@ def normalization_with_list_of_runs(
axs2[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs3 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
- normalized_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ fig, axs3 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
+ normalized_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
im2 = axs3[0].imshow(normalized_data_integrated, cmap="gray")
plt.colorbar(im2, ax=axs3[0])
axs3[0].set_title(f"Normalized data {_sample_run_number}")
@@ -413,7 +475,9 @@ def normalization_with_list_of_runs(
axs3[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs4 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs4 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
axs4[0].plot(lambda_array, profile, "*")
axs4[0].set_xlabel("Lambda (A)")
axs4[0].set_ylabel("mean of full image")
@@ -426,24 +490,36 @@ def normalization_with_list_of_runs(
plt.show()
- if export_corrected_integrated_normalized_data or export_corrected_stack_of_normalized_data:
+ if (
+ export_corrected_integrated_normalized_data
+ or export_corrected_stack_of_normalized_data
+ ):
# make up new output folder name
list_ob_runs = list(ob_master_dict.keys())
- str_ob_runs = "_".join([str(_ob_run_number) for _ob_run_number in list_ob_runs])
+ str_ob_runs = "_".join(
+ [str(_ob_run_number) for _ob_run_number in list_ob_runs]
+ )
full_output_folder = os.path.join(
- output_folder, f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}"
+ output_folder,
+ f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}",
) # issue for WEI here !
full_output_folder = os.path.abspath(full_output_folder)
os.makedirs(full_output_folder, exist_ok=True)
if export_corrected_integrated_normalized_data:
# making up the integrated sample data
- sample_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ sample_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
full_file_name = os.path.join(full_output_folder, "integrated.tif")
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} ...")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} ..."
+ )
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} is done!"
+ )
if export_corrected_stack_of_normalized_data:
output_stack_folder = os.path.join(full_output_folder, "stack")
@@ -451,12 +527,20 @@ def normalization_with_list_of_runs(
os.makedirs(output_stack_folder, exist_ok=True)
for _index, _data in enumerate(normalized_data[_sample_run_number]):
- _output_file = os.path.join(output_stack_folder, f"image{_index:04d}.tif")
+ _output_file = os.path.join(
+ output_stack_folder, f"image{_index:04d}.tif"
+ )
make_tiff(data=_data, filename=_output_file)
- logging.info(f"\t -> Exporting normalized data to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting normalized data to {output_stack_folder} is done!"
+ )
print(f"Exported normalized tif images are in: {output_stack_folder}!")
- spectra_file = sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name]
- logging.info(f"Exported time spectra file {spectra_file} to {output_stack_folder}!")
+ spectra_file = sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ]
+ logging.info(
+ f"Exported time spectra file {spectra_file} to {output_stack_folder}!"
+ )
shutil.copy(spectra_file, output_stack_folder)
# create x-axis file
@@ -475,7 +559,9 @@ def get_detector_offset_from_nexus(nexus_path: str) -> float:
"""get the detector offset from the nexus file"""
with h5py.File(nexus_path, "r") as hdf5_data:
try:
- detector_offset_micros = hdf5_data["entry"]["DASlogs"]["BL10:Det:TH:DSPT1:TIDelay"]["value"][0]
+ detector_offset_micros = hdf5_data["entry"]["DASlogs"][
+ "BL10:Det:TH:DSPT1:TIDelay"
+ ]["value"][0]
except KeyError:
detector_offset_micros = None
return detector_offset_micros
@@ -504,7 +590,9 @@ def export_sample_images(
make_tiff(data=_data, filename=_output_file)
logging.info(f"\t -> Exporting sample data to {output_stack_folder} is done!")
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!"
+ )
if export_corrected_integrated_sample_data:
# making up the integrated sample data
@@ -512,7 +600,9 @@ def export_sample_images(
full_file_name = os.path.join(sample_output_folder, "integrated.tif")
logging.info(f"\t -> Exporting integrated sample data to {full_file_name} ...")
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated sample data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated sample data to {full_file_name} is done!"
+ )
display(HTML(f"Created folder {output_stack_folder} for sample outputs!"))
@@ -529,10 +619,13 @@ def export_ob_images(
logging.info(f"> Exporting combined ob images to {output_folder} ...")
logging.info(f"\t{ob_run_numbers = }")
list_ob_runs_number_only = [
- str(isolate_run_number_from_full_path(_ob_run_number)) for _ob_run_number in ob_run_numbers
+ str(isolate_run_number_from_full_path(_ob_run_number))
+ for _ob_run_number in ob_run_numbers
]
if len(list_ob_runs_number_only) == 1:
- ob_output_folder = os.path.join(output_folder, f"ob_{list_ob_runs_number_only[0]}")
+ ob_output_folder = os.path.join(
+ output_folder, f"ob_{list_ob_runs_number_only[0]}"
+ )
else:
str_list_ob_runs = "_".join(list_ob_runs_number_only)
ob_output_folder = os.path.join(output_folder, f"ob_{str_list_ob_runs}")
@@ -561,12 +654,16 @@ def export_ob_images(
logging.info(f"\t -> Exporting ob data to {output_stack_folder} is done!")
# copy spectra file to the output folder
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!")
+ logging.info(
+ f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!"
+ )
display(HTML(f"Created folder {output_stack_folder} for OB outputs!"))
-def normalization(sample_folder=None, ob_folder=None, output_folder="./", verbose=False):
+def normalization(
+ sample_folder=None, ob_folder=None, output_folder="./", verbose=False
+):
pass
@@ -646,7 +743,9 @@ def update_dict_with_shutter_counts(master_dict: dict) -> tuple[dict, bool]:
if _value == "0":
break
list_shutter_counts.append(float(_value))
- master_dict[run_number][MasterDictKeys.shutter_counts] = list_shutter_counts
+ master_dict[run_number][MasterDictKeys.shutter_counts] = (
+ list_shutter_counts
+ )
return master_dict, status_all_shutter_counts_found
@@ -677,7 +776,9 @@ def update_dict_with_proton_charge(master_dict: dict) -> tuple[dict, bool]:
_nexus_path = master_dict[_run_number][MasterDictKeys.nexus_path]
try:
with h5py.File(_nexus_path, "r") as hdf5_data:
- proton_charge = hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ proton_charge = (
+ hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ )
except KeyError:
proton_charge = None
status_all_proton_charge_found = False
@@ -688,7 +789,9 @@ def update_dict_with_proton_charge(master_dict: dict) -> tuple[dict, bool]:
def update_dict_with_list_of_images(master_dict: dict) -> dict:
"""update the master dict with list of images"""
for _run_number in master_dict.keys():
- list_tif = retrieve_list_of_tif(master_dict[_run_number][MasterDictKeys.data_path])
+ list_tif = retrieve_list_of_tif(
+ master_dict[_run_number][MasterDictKeys.data_path]
+ )
master_dict[_run_number][MasterDictKeys.list_tif] = list_tif
@@ -699,7 +802,9 @@ def get_list_run_number(data_folder: str) -> list:
return list_run_number
-def update_dict_with_nexus_full_path(nexus_root_path: str, instrument: str, master_dict: dict) -> dict:
+def update_dict_with_nexus_full_path(
+ nexus_root_path: str, instrument: str, master_dict: dict
+) -> dict:
"""create dict of nexus path for each run number"""
for run_number in master_dict.keys():
master_dict[run_number][MasterDictKeys.nexus_path] = os.path.join(
@@ -717,7 +822,9 @@ def update_with_nexus_metadata(master_dict: dict) -> dict:
def update_dict_with_data_full_path(data_root_path: str, master_dict: dict) -> dict:
"""create dict of data path for each run number"""
for run_number in master_dict.keys():
- master_dict[run_number][MasterDictKeys.data_path] = os.path.join(data_root_path, f"Run_{run_number}")
+ master_dict[run_number][MasterDictKeys.data_path] = os.path.join(
+ data_root_path, f"Run_{run_number}"
+ )
def create_master_dict(
@@ -775,7 +882,9 @@ def create_master_dict(
return master_dict, status_metadata
-def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shutter_counts: list = None) -> list:
+def produce_list_shutter_for_each_image(
+ list_time_spectra: list = None, list_shutter_counts: list = None
+) -> list:
"""produce list of shutter counts for each image"""
delat_time_spectra = list_time_spectra[1] - list_time_spectra[0]
@@ -785,18 +894,26 @@ def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shu
logging.info(f"\t{list_index_jump = }")
logging.info(f"\t{list_shutter_counts = }")
- list_shutter_values_for_each_image = np.zeros(len(list_time_spectra), dtype=np.float32)
+ list_shutter_values_for_each_image = np.zeros(
+ len(list_time_spectra), dtype=np.float32
+ )
if len(list_shutter_counts) == 1: # resonance mode
list_shutter_values_for_each_image.fill(list_shutter_counts[0])
return list_shutter_values_for_each_image
- list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(list_shutter_counts[0])
+ list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(
+ list_shutter_counts[0]
+ )
for _index in range(1, len(list_index_jump)):
_start = list_index_jump[_index - 1]
_end = list_index_jump[_index]
- list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(list_shutter_counts[_index])
+ list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(
+ list_shutter_counts[_index]
+ )
- list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[-1]
+ list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[
+ -1
+ ]
return list_shutter_values_for_each_image
@@ -817,7 +934,9 @@ def combine_ob_images(
for _ob_run_number in ob_master_dict.keys():
logging.info(f"Combining ob# {_ob_run_number} ...")
- ob_data = np.array(ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32)
+ ob_data = np.array(
+ ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32
+ )
# get statistics of ob data
data_shape = ob_data.shape
@@ -826,7 +945,9 @@ def combine_ob_images(
number_of_zeros = np.sum(ob_data == 0)
logging.info(f"\t ob data shape: {data_shape}")
logging.info(f"\t Number of zeros in ob data: {number_of_zeros}")
- logging.info(f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%")
+ logging.info(
+ f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
+ )
logging.info(f"\t Mean of ob data: {np.mean(ob_data)}")
logging.info(f"\t maximum of ob data: {np.max(ob_data)}")
logging.info(f"\t minimum of ob data: {np.min(ob_data)}")
@@ -842,14 +963,20 @@ def combine_ob_images(
logging.info("\t -> Normalized by shutter counts")
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=ob_master_dict[_ob_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=ob_master_dict[_ob_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
logging.info(f"{list_shutter_values_for_each_image.shape = }")
temp_ob_data = np.empty_like(ob_data, dtype=np.float32)
for _index in range(len(list_shutter_values_for_each_image)):
- temp_ob_data[_index] = ob_data[_index] / list_shutter_values_for_each_image[_index]
+ temp_ob_data[_index] = (
+ ob_data[_index] / list_shutter_values_for_each_image[_index]
+ )
logging.info(f"{temp_ob_data.shape = }")
ob_data = temp_ob_data.copy()
@@ -897,9 +1024,15 @@ def combine_ob_images(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
- parser.add_argument("--sample", type=str, nargs=1, help="Full path to sample run number")
- parser.add_argument("--ob", type=str, nargs=1, help="Full path to the ob run number")
- parser.add_argument("--output", type=str, nargs=1, help="Path to the output folder", default="./")
+ parser.add_argument(
+ "--sample", type=str, nargs=1, help="Full path to sample run number"
+ )
+ parser.add_argument(
+ "--ob", type=str, nargs=1, help="Full path to the ob run number"
+ )
+ parser.add_argument(
+ "--output", type=str, nargs=1, help="Path to the output folder", default="./"
+ )
args = parser.parse_args()
logging.info(f"{args = }")
@@ -948,7 +1081,9 @@ def combine_ob_images(
# normalization(sample_folder=sample_folder, ob_folder=ob_folder, output_folder=output_folder)
- print(f"Normalization is done! Check the log file {log_file_name} for more details!")
+ print(
+ f"Normalization is done! Check the log file {log_file_name} for more details!"
+ )
print(f"Exported data to {output_folder}")
# sample = /SNS/VENUS/IPTS-34808/shared/autoreduce/mcp/November17_Sample6_UA_H_Batteries_1_5_Angs_min_30Hz_5C
diff --git a/notebooks/__code/resonance_fitting/normalization_for_timepix1_timepix3.py b/notebooks/__code/resonance_fitting/normalization_for_timepix1_timepix3.py
index 0e59f51b..0d80e774 100644
--- a/notebooks/__code/resonance_fitting/normalization_for_timepix1_timepix3.py
+++ b/notebooks/__code/resonance_fitting/normalization_for_timepix1_timepix3.py
@@ -14,7 +14,6 @@
from IPython.display import HTML, display
from PIL import Image
from skimage.io import imread
-from scipy.ndimage import median_filter
# from enum import Enum
# from scipy.constants import h, c, electron_volt, m_n
@@ -82,7 +81,9 @@ def _worker(fl):
return (imread(fl).astype(LOAD_DTYPE)).swapaxes(0, 1)
-def load_data_using_multithreading(list_tif: list = None, combine_tof: bool = False) -> np.ndarray:
+def load_data_using_multithreading(
+ list_tif: list = None, combine_tof: bool = False
+) -> np.ndarray:
"""load data using multithreading"""
with mp.Pool(processes=40) as pool:
data = pool.map(_worker, list_tif)
@@ -101,7 +102,9 @@ def retrieve_list_of_tif(folder: str) -> list:
def create_x_axis_file(
- lambda_array: np.ndarray = None, energy_array: np.ndarray = None, output_folder: str = "./"
+ lambda_array: np.ndarray = None,
+ energy_array: np.ndarray = None,
+ output_folder: str = "./",
) -> str:
"""create x axis file with lambda, energy and tof arrays"""
x_axis_data = {
@@ -139,6 +142,7 @@ def correct_chips_alignment(data: np.ndarray, config: dict) -> np.ndarray:
# return data_corrected
return data
+
def normalization_with_list_of_full_path(
sample_dict: dict = None,
ob_dict: dict = None,
@@ -208,10 +212,16 @@ def normalization_with_list_of_full_path(
export_corrected_stack_of_sample_data = export_mode.get("sample_stack", False)
export_corrected_stack_of_ob_data = export_mode.get("ob_stack", False)
- export_corrected_stack_of_normalized_data = export_mode.get("normalized_stack", False)
- export_corrected_integrated_sample_data = export_mode.get("sample_integrated", False)
+ export_corrected_stack_of_normalized_data = export_mode.get(
+ "normalized_stack", False
+ )
+ export_corrected_integrated_sample_data = export_mode.get(
+ "sample_integrated", False
+ )
export_corrected_integrated_ob_data = export_mode.get("ob_integrated", False)
- export_corrected_integrated_normalized_data = export_mode.get("normalized_integrated", False)
+ export_corrected_integrated_normalized_data = export_mode.get(
+ "normalized_integrated", False
+ )
export_x_axis = export_mode.get("x_axis", True)
logging.info(f"{export_corrected_stack_of_sample_data = }")
@@ -238,32 +248,40 @@ def normalization_with_list_of_full_path(
logging.info(f"loading ob# {_ob_run_number} ... ")
if verbose:
display(HTML(f"Loading ob# {_ob_run_number} ..."))
- ob_master_dict[_ob_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- ob_master_dict[_ob_run_number][MasterDictKeys.list_tif], combine_tof=False
+ ob_master_dict[_ob_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ ob_master_dict[_ob_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"ob# {_ob_run_number} loaded!")
logging.info(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
if verbose:
display(HTML(f"ob# {_ob_run_number} loaded!"))
- display(HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(f"{ob_master_dict[_ob_run_number][MasterDictKeys.data].shape = }")
+ )
if proton_charge_flag:
normalized_by_proton_charge = (
- sample_status_metadata.all_proton_charge_found and ob_status_metadata.all_proton_charge_found
+ sample_status_metadata.all_proton_charge_found
+ and ob_status_metadata.all_proton_charge_found
)
else:
normalized_by_proton_charge = False
if monitor_counts_flag:
normalized_by_monitor_counts = (
- sample_status_metadata.all_monitor_counts_found and ob_status_metadata.all_monitor_counts_found
+ sample_status_metadata.all_monitor_counts_found
+ and ob_status_metadata.all_monitor_counts_found
)
else:
normalized_by_monitor_counts = False
if shutter_counts_flag:
normalized_by_shutter_counts = (
- sample_status_metadata.all_shutter_counts_found and ob_status_metadata.all_shutter_counts_found
+ sample_status_metadata.all_shutter_counts_found
+ and ob_status_metadata.all_shutter_counts_found
)
else:
normalized_by_shutter_counts = False
@@ -280,9 +298,15 @@ def normalization_with_list_of_full_path(
max_iterations=max_iterations,
)
logging.info(f"{ob_data_combined.shape = }")
- logging.info(f"number of NaN in ob_data_combined data: {np.sum(np.isnan(ob_data_combined))}")
- logging.info(f"number of inf in ob_data_combined data: {np.sum(np.isinf(ob_data_combined))}")
- logging.info(f"number of zeros in ob_data_combined data: {np.sum(ob_data_combined == 0)} ")
+ logging.info(
+ f"number of NaN in ob_data_combined data: {np.sum(np.isnan(ob_data_combined))}"
+ )
+ logging.info(
+ f"number of inf in ob_data_combined data: {np.sum(np.isinf(ob_data_combined))}"
+ )
+ logging.info(
+ f"number of zeros in ob_data_combined data: {np.sum(ob_data_combined == 0)} "
+ )
if verbose:
display(HTML(f"{ob_data_combined.shape = }"))
@@ -291,7 +315,9 @@ def normalization_with_list_of_full_path(
logging.info("Correcting chips alignment ...")
if verbose:
display(HTML("Correcting chips alignment ..."))
- ob_data_combined = correct_chips_alignment(ob_data_combined, correct_chips_alignment_config)
+ ob_data_combined = correct_chips_alignment(
+ ob_data_combined, correct_chips_alignment_config
+ )
logging.info("Chips alignment corrected!")
if verbose:
display(HTML("Chips alignment corrected!"))
@@ -304,7 +330,9 @@ def normalization_with_list_of_full_path(
export_corrected_stack_of_ob_data,
export_corrected_integrated_ob_data,
ob_data_combined,
- spectra_file_name=ob_master_dict[_ob_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=ob_master_dict[_ob_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
# load dc images
@@ -312,39 +340,56 @@ def normalization_with_list_of_full_path(
logging.info(f"loading dc# {_dc_run_number} ... ")
if verbose:
display(HTML(f"Loading dc# {_dc_run_number} ..."))
- dc_master_dict[_dc_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- dc_master_dict[_dc_run_number][MasterDictKeys.list_tif], combine_tof=False
+ dc_master_dict[_dc_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ dc_master_dict[_dc_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"dc# {_dc_run_number} loaded!")
logging.info(f"{dc_master_dict[_dc_run_number][MasterDictKeys.data].shape = }")
if verbose:
display(HTML(f"dc# {_dc_run_number} loaded!"))
- display(HTML(f"{dc_master_dict[_dc_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(f"{dc_master_dict[_dc_run_number][MasterDictKeys.data].shape = }")
+ )
# combine all ob images
dc_data_combined = combine_dc_images(dc_master_dict)
-
+
# load sample images
for _sample_run_number in sample_master_dict.keys():
logging.info(f"loading sample# {_sample_run_number} ... ")
if verbose:
display(HTML(f"Loading sample# {_sample_run_number} ..."))
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = load_data_using_multithreading(
- sample_master_dict[_sample_run_number][MasterDictKeys.list_tif], combine_tof=False
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ load_data_using_multithreading(
+ sample_master_dict[_sample_run_number][MasterDictKeys.list_tif],
+ combine_tof=False,
+ )
)
logging.info(f"sample# {_sample_run_number} loaded!")
- logging.info(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }")
+ logging.info(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
if verbose:
display(HTML(f"sample# {_sample_run_number} loaded!"))
- display(HTML(f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"))
+ display(
+ HTML(
+ f"{sample_master_dict[_sample_run_number][MasterDictKeys.data].shape = }"
+ )
+ )
if correct_chips_alignment_flag:
logging.info("Correcting chips alignment ...")
if verbose:
display(HTML("Correcting chips alignment ..."))
for _sample_run_number in sample_master_dict.keys():
- sample_master_dict[_sample_run_number][MasterDictKeys.data] = correct_chips_alignment(
- sample_master_dict[_sample_run_number][MasterDictKeys.data], correct_chips_alignment_config
+ sample_master_dict[_sample_run_number][MasterDictKeys.data] = (
+ correct_chips_alignment(
+ sample_master_dict[_sample_run_number][MasterDictKeys.data],
+ correct_chips_alignment_config,
+ )
)
logging.info("Chips alignment corrected!")
if verbose:
@@ -369,7 +414,9 @@ def normalization_with_list_of_full_path(
logging.info(f"\t sample data shape: {data_shape}")
logging.info(f"\t data type of _sample_data: {_sample_data.dtype}")
logging.info(f"\t Number of zeros in sample data: {number_of_zeros}")
- logging.info(f"\t Number of nan in sample data: {np.sum(np.isnan(_sample_data))}")
+ logging.info(
+ f"\t Number of nan in sample data: {np.sum(np.isnan(_sample_data))}"
+ )
logging.info(
f"\t Percentage of zeros in sample data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
)
@@ -380,7 +427,9 @@ def normalization_with_list_of_full_path(
if normalized_by_proton_charge:
logging.info("\t -> Normalized by proton charge")
- proton_charge = sample_master_dict[_sample_run_number][MasterDictKeys.proton_charge]
+ proton_charge = sample_master_dict[_sample_run_number][
+ MasterDictKeys.proton_charge
+ ]
logging.info(f"\t\t proton charge: {proton_charge} C")
logging.info(f"\t\t{type(proton_charge) = }")
logging.info(f"\t\tbefore division: {_sample_data.dtype = }")
@@ -389,7 +438,9 @@ def normalization_with_list_of_full_path(
if normalized_by_monitor_counts:
logging.info("\t -> Normalized by monitor counts")
- monitor_counts = sample_master_dict[_sample_run_number][MasterDictKeys.monitor_counts]
+ monitor_counts = sample_master_dict[_sample_run_number][
+ MasterDictKeys.monitor_counts
+ ]
logging.info(f"\t\t monitor counts: {monitor_counts}")
logging.info(f"\t\t{type(monitor_counts) = }")
_sample_data = _sample_data / monitor_counts
@@ -397,12 +448,18 @@ def normalization_with_list_of_full_path(
if normalized_by_shutter_counts:
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=sample_master_dict[_sample_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=sample_master_dict[_sample_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
sample_data = []
- for _sample, _shutter_value in zip(_sample_data, list_shutter_values_for_each_image, strict=False):
+ for _sample, _shutter_value in zip(
+ _sample_data, list_shutter_values_for_each_image, strict=False
+ ):
sample_data.append(_sample / _shutter_value)
_sample_data = np.array(sample_data)
@@ -412,52 +469,74 @@ def normalization_with_list_of_full_path(
logging.info(f"{ob_data_combined.dtype = }")
# export sample data after correction if requested
- if export_corrected_stack_of_sample_data or export_corrected_integrated_sample_data:
+ if (
+ export_corrected_stack_of_sample_data
+ or export_corrected_integrated_sample_data
+ ):
export_sample_images(
output_folder,
export_corrected_stack_of_sample_data,
export_corrected_integrated_sample_data,
_sample_run_number,
_sample_data,
- spectra_file_name=sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name],
+ spectra_file_name=sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ],
)
if dc_data_combined is not None:
- logging.info(f"normalization with DC subtraction")
- _normalized_data = np.divide(np.subtract(_sample_data, dc_data_combined), np.subtract(ob_data_combined, dc_data_combined),
- out=np.zeros_like(_sample_data),
- where=(ob_data_combined - dc_data_combined)!=0)
+ logging.info("normalization with DC subtraction")
+ _normalized_data = np.divide(
+ np.subtract(_sample_data, dc_data_combined),
+ np.subtract(ob_data_combined, dc_data_combined),
+ out=np.zeros_like(_sample_data),
+ where=(ob_data_combined - dc_data_combined) != 0,
+ )
else:
- logging.info(f"normalization without DC subtraction")
- _normalized_data = np.divide(_sample_data, ob_data_combined,
- out=np.zeros_like(_sample_data),
- where=ob_data_combined!=0)
-
+ logging.info("normalization without DC subtraction")
+ _normalized_data = np.divide(
+ _sample_data,
+ ob_data_combined,
+ out=np.zeros_like(_sample_data),
+ where=ob_data_combined != 0,
+ )
+
_normalized_data[ob_data_combined == 0] = 0
normalized_data[_sample_run_number] = _normalized_data
# normalized_data[_sample_run_number] = np.array(np.divide(_sample_data, ob_data_combined))
logging.info(f"{normalized_data[_sample_run_number].shape = }")
logging.info(f"{normalized_data[_sample_run_number].dtype = }")
- logging.info(f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[_sample_run_number]))}")
- logging.info(f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[_sample_run_number]))}")
+ logging.info(
+ f"number of NaN in normalized data: {np.sum(np.isnan(normalized_data[_sample_run_number]))}"
+ )
+ logging.info(
+ f"number of inf in normalized data: {np.sum(np.isinf(normalized_data[_sample_run_number]))}"
+ )
- detector_delay_us = sample_master_dict[_sample_run_number][MasterDictKeys.detector_delay_us]
- time_spectra = sample_master_dict[_sample_run_number][MasterDictKeys.list_spectra]
+ detector_delay_us = sample_master_dict[_sample_run_number][
+ MasterDictKeys.detector_delay_us
+ ]
+ time_spectra = sample_master_dict[_sample_run_number][
+ MasterDictKeys.list_spectra
+ ]
if time_spectra is None:
- logging.info("Time spectra is None, cannot convert to lambda or energy arrays")
+ logging.info(
+ "Time spectra is None, cannot convert to lambda or energy arrays"
+ )
lambda_array = None
energy_array = None
-
- else:
- logging.info(f"We have a time_spectra!")
+ else:
+ logging.info("We have a time_spectra!")
logging.info(f"time spectra shape: {time_spectra.shape}")
-
+
if detector_delay_us is None:
detector_delay_us = 0.0
- logging.info(f"detector delay is None, setting it to {detector_delay_us} us")
+ logging.info(
+ f"detector delay is None, setting it to {detector_delay_us} us"
+ )
logging.info(f"we have a detector delay of {detector_delay_us} us")
@@ -487,26 +566,29 @@ def normalization_with_list_of_full_path(
logging.info(f"Preview: {preview = }")
if preview:
-
# display preview of normalized data
- fig, axs1 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs1 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
sample_data_integrated = np.nanmean(_sample_data, axis=0)
im0 = axs1[0].imshow(sample_data_integrated, cmap="gray")
plt.colorbar(im0, ax=axs1[0])
display(HTML(f"Preview of run {_sample_run_number}
"))
display(HTML(f"detector delay: {detector_delay_us:.2f} us"))
-
- axs1[0].set_title(f"Integrated Sample data")
+
+ axs1[0].set_title("Integrated Sample data")
sample_integrated1 = np.nansum(_sample_data, axis=1)
sample_integrated = np.nansum(sample_integrated1, axis=1)
- axs1[1].plot(sample_integrated, 'o')
+ axs1[1].plot(sample_integrated, "o")
axs1[1].set_xlabel("File image index")
axs1[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs2 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs2 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
ob_data_integrated = np.nanmean(ob_data_combined, axis=0)
im1 = axs2[0].imshow(ob_data_integrated, cmap="gray")
plt.colorbar(im1, ax=axs2[0])
@@ -514,13 +596,15 @@ def normalization_with_list_of_full_path(
ob_integrated1 = np.nansum(ob_data_combined, axis=1)
ob_integrated = np.nansum(ob_integrated1, axis=1)
- axs2[1].plot(ob_integrated, 'o')
+ axs2[1].plot(ob_integrated, "o")
axs2[1].set_xlabel("File image index")
axs2[1].set_ylabel("mean of full image")
plt.tight_layout()
if dc_data_combined is not None:
- fig, axs_dc = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs_dc = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
dc_data_integrated = np.nanmean(dc_data_combined, axis=0)
im_dc = axs_dc[0].imshow(dc_data_integrated, cmap="gray")
plt.colorbar(im_dc, ax=axs_dc[0])
@@ -528,26 +612,32 @@ def normalization_with_list_of_full_path(
dc_integrated1 = np.nansum(dc_data_combined, axis=1)
dc_integrated = np.nansum(dc_integrated1, axis=1)
- axs_dc[1].plot(dc_integrated, 'o')
+ axs_dc[1].plot(dc_integrated, "o")
axs_dc[1].set_xlabel("File image index")
axs_dc[1].set_ylabel("mean of full image")
plt.tight_layout()
- fig, axs3 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
- normalized_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ fig, axs3 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
+ normalized_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
im2 = axs3[0].imshow(normalized_data_integrated, cmap="gray")
plt.colorbar(im2, ax=axs3[0])
- axs3[0].set_title(f"Integrated Normalized data")
+ axs3[0].set_title("Integrated Normalized data")
profile_step1 = np.nanmean(normalized_data[_sample_run_number], axis=1)
profile = np.nanmean(profile_step1, axis=1)
- axs3[1].plot(profile, 'o')
+ axs3[1].plot(profile, "o")
axs3[1].set_xlabel("File image index")
axs3[1].set_ylabel("mean of full image")
plt.tight_layout()
if lambda_array is not None:
- fig, axs4 = plt.subplots(1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height))
+ fig, axs4 = plt.subplots(
+ 1, 2, figsize=(2 * PLOT_SIZE.width, PLOT_SIZE.height)
+ )
logging.info(f"{np.shape(profile) = }")
axs4[0].plot(lambda_array, profile, "*")
@@ -562,24 +652,36 @@ def normalization_with_list_of_full_path(
plt.show()
- if export_corrected_integrated_normalized_data or export_corrected_stack_of_normalized_data:
+ if (
+ export_corrected_integrated_normalized_data
+ or export_corrected_stack_of_normalized_data
+ ):
# make up new output folder name
list_ob_runs = list(ob_master_dict.keys())
- str_ob_runs = "_".join([str(_ob_run_number) for _ob_run_number in list_ob_runs])
+ str_ob_runs = "_".join(
+ [str(_ob_run_number) for _ob_run_number in list_ob_runs]
+ )
full_output_folder = os.path.join(
- output_folder, f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}"
+ output_folder,
+ f"normalized_sample_{_sample_run_number}_obs_{str_ob_runs}",
) # issue for WEI here !
full_output_folder = os.path.abspath(full_output_folder)
os.makedirs(full_output_folder, exist_ok=True)
if export_corrected_integrated_normalized_data:
# making up the integrated sample data
- sample_data_integrated = np.nanmean(normalized_data[_sample_run_number], axis=0)
+ sample_data_integrated = np.nanmean(
+ normalized_data[_sample_run_number], axis=0
+ )
full_file_name = os.path.join(full_output_folder, "integrated.tif")
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} ...")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} ..."
+ )
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated normalized data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated normalized data to {full_file_name} is done!"
+ )
if export_corrected_stack_of_normalized_data:
output_stack_folder = os.path.join(full_output_folder, "stack")
@@ -587,14 +689,22 @@ def normalization_with_list_of_full_path(
os.makedirs(output_stack_folder, exist_ok=True)
for _index, _data in enumerate(normalized_data[_sample_run_number]):
- _output_file = os.path.join(output_stack_folder, f"image{_index:04d}.tif")
+ _output_file = os.path.join(
+ output_stack_folder, f"image{_index:04d}.tif"
+ )
make_tiff(data=_data, filename=_output_file)
- logging.info(f"\t -> Exporting normalized data to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting normalized data to {output_stack_folder} is done!"
+ )
print(f"Exported normalized tif images are in: {output_stack_folder}!")
- spectra_file = sample_master_dict[_sample_run_number][MasterDictKeys.spectra_file_name]
+ spectra_file = sample_master_dict[_sample_run_number][
+ MasterDictKeys.spectra_file_name
+ ]
if spectra_file and Path(spectra_file).exists():
- logging.info(f"Exported time spectra file {spectra_file} to {output_stack_folder}!")
+ logging.info(
+ f"Exported time spectra file {spectra_file} to {output_stack_folder}!"
+ )
shutil.copy(spectra_file, output_stack_folder)
# create x-axis file
@@ -613,7 +723,9 @@ def get_detector_offset_from_nexus(nexus_path: str) -> float:
"""get the detector offset from the nexus file"""
with h5py.File(nexus_path, "r") as hdf5_data:
try:
- detector_offset_micros = hdf5_data["entry"]["DASlogs"]["BL10:Det:TH:DSPT1:TIDelay"]["value"][0]
+ detector_offset_micros = hdf5_data["entry"]["DASlogs"][
+ "BL10:Det:TH:DSPT1:TIDelay"
+ ]["value"][0]
except KeyError:
detector_offset_micros = None
return detector_offset_micros
@@ -642,7 +754,9 @@ def export_sample_images(
make_tiff(data=_data, filename=_output_file)
logging.info(f"\t -> Exporting sample data to {output_stack_folder} is done!")
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!")
+ logging.info(
+ f"\t -> Exporting spectra file {spectra_file_name} to {output_stack_folder} is done!"
+ )
if export_corrected_integrated_sample_data:
# making up the integrated sample data
@@ -650,7 +764,9 @@ def export_sample_images(
full_file_name = os.path.join(sample_output_folder, "integrated.tif")
logging.info(f"\t -> Exporting integrated sample data to {full_file_name} ...")
make_tiff(data=sample_data_integrated, filename=full_file_name)
- logging.info(f"\t -> Exporting integrated sample data to {full_file_name} is done!")
+ logging.info(
+ f"\t -> Exporting integrated sample data to {full_file_name} is done!"
+ )
display(HTML(f"Created folder {output_stack_folder} for sample outputs!"))
@@ -667,10 +783,13 @@ def export_ob_images(
logging.info(f"> Exporting combined ob images to {output_folder} ...")
logging.info(f"\t{ob_run_numbers = }")
list_ob_runs_number_only = [
- str(isolate_run_number_from_full_path(_ob_run_number)) for _ob_run_number in ob_run_numbers
+ str(isolate_run_number_from_full_path(_ob_run_number))
+ for _ob_run_number in ob_run_numbers
]
if len(list_ob_runs_number_only) == 1:
- ob_output_folder = os.path.join(output_folder, f"ob_{list_ob_runs_number_only[0]}")
+ ob_output_folder = os.path.join(
+ output_folder, f"ob_{list_ob_runs_number_only[0]}"
+ )
else:
str_list_ob_runs = "_".join(list_ob_runs_number_only)
ob_output_folder = os.path.join(output_folder, f"ob_{str_list_ob_runs}")
@@ -699,7 +818,9 @@ def export_ob_images(
logging.info(f"\t -> Exporting ob data to {output_stack_folder} is done!")
# copy spectra file to the output folder
shutil.copy(spectra_file_name, os.path.join(output_stack_folder))
- logging.info(f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!")
+ logging.info(
+ f"\t -> Exported spectra file {spectra_file_name} to {output_stack_folder}!"
+ )
display(HTML(f"Created folder {output_stack_folder} for OB outputs!"))
@@ -790,7 +911,9 @@ def update_dict_with_shutter_counts(master_dict: dict) -> tuple[dict, bool]:
if _value == "0":
break
list_shutter_counts.append(float(_value))
- master_dict[run_number][MasterDictKeys.shutter_counts] = list_shutter_counts
+ master_dict[run_number][MasterDictKeys.shutter_counts] = (
+ list_shutter_counts
+ )
return master_dict, status_all_shutter_counts_found
@@ -827,11 +950,15 @@ def update_dict_with_proton_charge(master_dict: dict) -> tuple[dict, bool]:
try:
with h5py.File(_nexus_path, "r") as hdf5_data:
- proton_charge = hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ proton_charge = (
+ hdf5_data["entry"][MasterDictKeys.proton_charge][0] / 1e12
+ )
except KeyError:
proton_charge = None
status_all_proton_charge_found = False
- master_dict[_run_number][MasterDictKeys.proton_charge] = np.float32(proton_charge)
+ master_dict[_run_number][MasterDictKeys.proton_charge] = np.float32(
+ proton_charge
+ )
return status_all_proton_charge_found
@@ -852,14 +979,18 @@ def update_dict_with_monitor_counts(master_dict: dict) -> bool:
except KeyError:
monitor_counts = None
status_all_monitor_counts_found = False
- master_dict[_run_number][MasterDictKeys.monitor_counts] = np.float32(monitor_counts)
+ master_dict[_run_number][MasterDictKeys.monitor_counts] = np.float32(
+ monitor_counts
+ )
return status_all_monitor_counts_found
def update_dict_with_list_of_images(master_dict: dict) -> dict:
"""update the master dict with list of images"""
for _run_number in master_dict.keys():
- list_tif = retrieve_list_of_tif(master_dict[_run_number][MasterDictKeys.data_path])
+ list_tif = retrieve_list_of_tif(
+ master_dict[_run_number][MasterDictKeys.data_path]
+ )
logging.info(f"Retrieved {len(list_tif)} tif files for run {_run_number}!")
master_dict[_run_number][MasterDictKeys.list_tif] = list_tif
@@ -871,7 +1002,9 @@ def get_list_run_number(data_folder: str) -> list:
return list_run_number
-def update_dict_with_nexus_full_path(nexus_root_path: str, instrument: str, master_dict: dict) -> dict:
+def update_dict_with_nexus_full_path(
+ nexus_root_path: str, instrument: str, master_dict: dict
+) -> dict:
"""create dict of nexus path for each run number"""
for run_number in master_dict.keys():
master_dict[run_number][MasterDictKeys.nexus_path] = os.path.join(
@@ -892,7 +1025,9 @@ def update_with_nexus_metadata(master_dict: dict) -> dict:
def update_dict_with_data_full_path(data_root_path: str, master_dict: dict) -> dict:
"""create dict of data path for each run number"""
for run_number in master_dict.keys():
- master_dict[run_number][MasterDictKeys.data_path] = os.path.join(data_root_path, f"Run_{run_number}")
+ master_dict[run_number][MasterDictKeys.data_path] = os.path.join(
+ data_root_path, f"Run_{run_number}"
+ )
def create_master_dict(
@@ -942,7 +1077,9 @@ def create_master_dict(
return master_dict, status_metadata
-def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shutter_counts: list = None) -> list:
+def produce_list_shutter_for_each_image(
+ list_time_spectra: list = None, list_shutter_counts: list = None
+) -> list:
"""produce list of shutter counts for each image"""
delat_time_spectra = list_time_spectra[1] - list_time_spectra[0]
@@ -952,31 +1089,41 @@ def produce_list_shutter_for_each_image(list_time_spectra: list = None, list_shu
logging.info(f"\t{list_index_jump = }")
logging.info(f"\t{list_shutter_counts = }")
- list_shutter_values_for_each_image = np.zeros(len(list_time_spectra), dtype=np.float32)
+ list_shutter_values_for_each_image = np.zeros(
+ len(list_time_spectra), dtype=np.float32
+ )
if len(list_shutter_counts) == 1: # resonance mode
list_shutter_values_for_each_image.fill(list_shutter_counts[0])
return list_shutter_values_for_each_image
- list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(list_shutter_counts[0])
+ list_shutter_values_for_each_image[0 : list_index_jump[0] + 1].fill(
+ list_shutter_counts[0]
+ )
for _index in range(1, len(list_index_jump)):
_start = list_index_jump[_index - 1]
_end = list_index_jump[_index]
- list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(list_shutter_counts[_index])
+ list_shutter_values_for_each_image[_start + 1 : _end + 1].fill(
+ list_shutter_counts[_index]
+ )
- list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[-1]
+ list_shutter_values_for_each_image[list_index_jump[-1] + 1 :] = list_shutter_counts[
+ -1
+ ]
return list_shutter_values_for_each_image
-def replace_zero_with_local_median(data: np.ndarray,
- kernel_size: Tuple[int, int, int] = (3, 3, 3),
- max_iterations: int = 10) -> np.ndarray:
+def replace_zero_with_local_median(
+ data: np.ndarray,
+ kernel_size: Tuple[int, int, int] = (3, 3, 3),
+ max_iterations: int = 10,
+) -> np.ndarray:
"""
Replace 0 values in a 3D array using local median filtering.
This function ONLY processes small neighborhoods around 0 pixels,
avoiding expensive computation on the entire dataset.
-
+
Parameters:
-----------
data : np.ndarray
@@ -987,7 +1134,7 @@ def replace_zero_with_local_median(data: np.ndarray,
max_iterations : int
Maximum number of iterations to replace 0 values
Default is 10
-
+
Returns:
--------
np.ndarray
@@ -1006,7 +1153,7 @@ def replace_zero_with_local_median(data: np.ndarray,
# Calculate padding for kernel
pad_h, pad_w, pad_d = [k // 2 for k in kernel_size]
-
+
for iteration in range(max_iterations):
# Find current 0 locations
zero_coords = np.argwhere(result == 0)
@@ -1016,13 +1163,15 @@ def replace_zero_with_local_median(data: np.ndarray,
logging.info(f"All 0 values replaced after {iteration} iterations")
break
- logging.info(f"Iteration {iteration + 1}: {current_zero_count} 0 values remaining")
+ logging.info(
+ f"Iteration {iteration + 1}: {current_zero_count} 0 values remaining"
+ )
# Process each 0 pixel individually
replaced_count = 0
for coord in zero_coords:
y, x, z = coord
-
+
# Define the local neighborhood bounds
y_min = max(0, y - pad_h)
y_max = min(result.shape[0], y + pad_h + 1)
@@ -1030,13 +1179,13 @@ def replace_zero_with_local_median(data: np.ndarray,
x_max = min(result.shape[1], x + pad_w + 1)
z_min = max(0, z - pad_d)
z_max = min(result.shape[2], z + pad_d + 1)
-
+
# Extract the local neighborhood
neighborhood = result[y_min:y_max, x_min:x_max, z_min:z_max]
-
+
# Get non-NaN values in the neighborhood
valid_values = neighborhood[~np.isnan(neighborhood)]
-
+
# If we have valid values, compute median and replace
if len(valid_values) > 0:
median_value = np.median(valid_values)
@@ -1048,30 +1197,34 @@ def replace_zero_with_local_median(data: np.ndarray,
# If no progress was made, break
if replaced_count == 0:
remaining_zero_count = np.sum(result == 0)
- logging.info(f"No progress made. {remaining_zero_count} zero values could not be replaced")
+ logging.info(
+ f"No progress made. {remaining_zero_count} zero values could not be replaced"
+ )
logging.info("(These may be in regions with no valid neighbors)")
break
final_zero_count = np.sum(result == 0)
logging.info(f"Final zero count: {final_zero_count}")
- logging.info(f"Successfully replaced {initial_zero_count - final_zero_count} zero values")
+ logging.info(
+ f"Successfully replaced {initial_zero_count - final_zero_count} zero values"
+ )
return result
def combine_dc_images(dc_master_dict: dict) -> np.ndarray:
"""combine all dc images
-
+
Parameters:
-----------
dc_master_dict : dict
master dict of dc run numbers
-
+
Returns:
--------
np.ndarray
combined dc data
-
+
"""
logging.info("Combining all dark current images")
full_dc_data = []
@@ -1082,7 +1235,9 @@ def combine_dc_images(dc_master_dict: dict) -> np.ndarray:
for _dc_run_number in dc_master_dict.keys():
logging.info(f"Combining dc# {_dc_run_number} ...")
- dc_data = np.array(dc_master_dict[_dc_run_number][MasterDictKeys.data], dtype=np.float32)
+ dc_data = np.array(
+ dc_master_dict[_dc_run_number][MasterDictKeys.data], dtype=np.float32
+ )
full_dc_data.append(dc_data)
logging.info(f"{np.shape(full_dc_data) = }")
@@ -1101,11 +1256,11 @@ def combine_ob_images(
use_shutter_counts: bool = False,
replace_ob_zeros_by_nan: bool = False,
replace_ob_zeros_by_local_median: bool = False,
- kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
+ kernel_size_for_local_median: Tuple[int, int, int] = (3, 3, 3),
max_iterations: int = 10,
) -> np.ndarray:
"""combine all ob images and correct by proton charge and shutter counts
-
+
Parameters:
-----------
ob_master_dict : dict
@@ -1124,12 +1279,12 @@ def combine_ob_images(
kernel size for local median filtering
max_iterations : int
maximum number of iterations for local median filtering
-
+
Returns:
--------
np.ndarray
combined ob data
-
+
"""
logging.info("Combining all open beam images")
@@ -1137,15 +1292,21 @@ def combine_ob_images(
logging.info(f"\tcorrecting by monitor counts: {use_monitor_counts}")
logging.info(f"\tshutter counts: {use_shutter_counts}")
logging.info(f"\treplace ob zeros by nan: {replace_ob_zeros_by_nan}")
- logging.info(f"\treplace ob zeros by local median: {replace_ob_zeros_by_local_median}")
- logging.info(f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
- f"x:{kernel_size_for_local_median[1]}, "
- f"tof:{kernel_size_for_local_median[2]}")
+ logging.info(
+ f"\treplace ob zeros by local median: {replace_ob_zeros_by_local_median}"
+ )
+ logging.info(
+ f"\tkernel size for local median: y:{kernel_size_for_local_median[0]}, "
+ f"x:{kernel_size_for_local_median[1]}, "
+ f"tof:{kernel_size_for_local_median[2]}"
+ )
full_ob_data_corrected = []
for _ob_run_number in ob_master_dict.keys():
logging.info(f"Combining ob# {_ob_run_number} ...")
- ob_data = np.array(ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32)
+ ob_data = np.array(
+ ob_master_dict[_ob_run_number][MasterDictKeys.data], dtype=np.float32
+ )
# get statistics of ob data
data_shape = ob_data.shape
@@ -1154,7 +1315,9 @@ def combine_ob_images(
number_of_zeros = np.sum(ob_data == 0)
logging.info(f"\t ob data shape: {data_shape}")
logging.info(f"\t Number of zeros in ob data: {number_of_zeros}")
- logging.info(f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%")
+ logging.info(
+ f"\t Percentage of zeros in ob data: {number_of_zeros / (data_shape[0] * nbr_pixels) * 100:.2f}%"
+ )
logging.info(f"\t Mean of ob data: {np.mean(ob_data)}")
logging.info(f"\t maximum of ob data: {np.max(ob_data)}")
logging.info(f"\t minimum of ob data: {np.min(ob_data)}")
@@ -1172,7 +1335,9 @@ def combine_ob_images(
if use_monitor_counts:
logging.info("\t -> Normalized by monitor counts")
- monitor_counts = ob_master_dict[_ob_run_number][MasterDictKeys.monitor_counts]
+ monitor_counts = ob_master_dict[_ob_run_number][
+ MasterDictKeys.monitor_counts
+ ]
logging.info(f"\t\t monitor counts: {monitor_counts}")
logging.info(f"\t\t{type(monitor_counts) = }")
ob_data = ob_data / monitor_counts
@@ -1182,14 +1347,20 @@ def combine_ob_images(
logging.info("\t -> Normalized by shutter counts")
list_shutter_values_for_each_image = produce_list_shutter_for_each_image(
- list_time_spectra=ob_master_dict[_ob_run_number][MasterDictKeys.list_spectra],
- list_shutter_counts=ob_master_dict[_ob_run_number][MasterDictKeys.shutter_counts],
+ list_time_spectra=ob_master_dict[_ob_run_number][
+ MasterDictKeys.list_spectra
+ ],
+ list_shutter_counts=ob_master_dict[_ob_run_number][
+ MasterDictKeys.shutter_counts
+ ],
)
logging.info(f"{list_shutter_values_for_each_image.shape = }")
temp_ob_data = np.empty_like(ob_data, dtype=np.float32)
for _index in range(len(list_shutter_values_for_each_image)):
- temp_ob_data[_index] = ob_data[_index] / list_shutter_values_for_each_image[_index]
+ temp_ob_data[_index] = (
+ ob_data[_index] / list_shutter_values_for_each_image[_index]
+ )
logging.info(f"{temp_ob_data.shape = }")
ob_data = temp_ob_data.copy()
@@ -1197,9 +1368,11 @@ def combine_ob_images(
# logging.info(f"{ob_data_combined.shape = }")
if replace_ob_zeros_by_local_median:
- ob_data = replace_zero_with_local_median(ob_data,
- kernel_size=kernel_size_for_local_median,
- max_iterations=max_iterations)
+ ob_data = replace_zero_with_local_median(
+ ob_data,
+ kernel_size=kernel_size_for_local_median,
+ max_iterations=max_iterations,
+ )
full_ob_data_corrected.append(ob_data)
logging.info(f"{np.shape(full_ob_data_corrected) = }")
@@ -1224,9 +1397,15 @@ def combine_ob_images(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
- parser.add_argument("--sample", type=str, nargs=1, help="Full path to sample run number")
- parser.add_argument("--ob", type=str, nargs=1, help="Full path to the ob run number")
- parser.add_argument("--output", type=str, nargs=1, help="Path to the output folder", default="./")
+ parser.add_argument(
+ "--sample", type=str, nargs=1, help="Full path to sample run number"
+ )
+ parser.add_argument(
+ "--ob", type=str, nargs=1, help="Full path to the ob run number"
+ )
+ parser.add_argument(
+ "--output", type=str, nargs=1, help="Path to the output folder", default="./"
+ )
args = parser.parse_args()
logging.info(f"{args = }")
@@ -1275,7 +1454,9 @@ def combine_ob_images(
# normalization(sample_folder=sample_folder, ob_folder=ob_folder, output_folder=output_folder)
- print(f"Normalization is done! Check the log file {log_file_name} for more details!")
+ print(
+ f"Normalization is done! Check the log file {log_file_name} for more details!"
+ )
print(f"Exported data to {output_folder}")
# sample = /SNS/VENUS/IPTS-34808/shared/autoreduce/mcp/November17_Sample6_UA_H_Batteries_1_5_Angs_min_30Hz_5C
diff --git a/notebooks/__code/resonance_fitting/resonance_fitting.py b/notebooks/__code/resonance_fitting/resonance_fitting.py
index 65fdc9de..28a7f38b 100644
--- a/notebooks/__code/resonance_fitting/resonance_fitting.py
+++ b/notebooks/__code/resonance_fitting/resonance_fitting.py
@@ -1,7 +1,5 @@
-import glob
from dotenv import load_dotenv
import logging as notebook_logging
-from logging.handlers import RotatingFileHandler
import os
from loguru import logger
from pathlib import Path
@@ -11,16 +9,17 @@
import ipywidgets as widgets
import matplotlib.pyplot as plt
from IPython.display import HTML, display
-from ipywidgets import interactive
-from PIL import Image
import periodictable
import ipysheet
-from ipysheet import sheet, cell, row, column, from_dataframe, to_array, calculation
+from ipysheet import from_dataframe, to_array
# from pleiades.processing.normalization import normalization as normalization_with_pleaides
# from pleiades.processing import Roi as PleiadesRoi
# from pleiades.processing import Facility
-from pleiades.sammy.io.data_manager import convert_csv_to_sammy_twenty, validate_sammy_twenty_format
+from pleiades.sammy.io.data_manager import (
+ convert_csv_to_sammy_twenty,
+ validate_sammy_twenty_format,
+)
from pleiades.sammy.io.json_manager import JsonManager
from pleiades.sammy.io.inp_manager import InpManager
from pleiades.sammy.backends.local import LocalSammyRunner
@@ -29,26 +28,18 @@
from pleiades.sammy.results.manager import ResultsManager
from __code.resonance_fitting import VENUS_RES_FUNC, SAMMY_EXE_PATH
-from __code._utilities.list import extract_list_of_runs_from_string
-from __code._utilities.nexus import extract_file_path_from_nexus
from __code._utilities.logger import display_dictionary_in_logging
# from __code.ipywe.myfileselector import MyFileSelectorPanel
-from __code.resonance_fitting.config import DEBUG_DATA, timepix1_config, timepix3_config
+from __code.resonance_fitting.config import DEBUG_DATA
from __code.resonance_fitting.get import Get
from __code.normalization_tof.normalization_tof import NormalizationTof
from __code.ipywe.fileselector import FileSelectorPanel as FileSelectorPanel
-from __code.normalization_tof import DetectorType, autoreduce_dir, distance_source_detector_m, raw_dir
-from __code.normalization_tof.normalization_for_timepix1_timepix3 import (
- load_data_using_multithreading,
- # normalization,
- normalization_with_list_of_full_path,
- retrieve_list_of_tif,
-)
FONT_SIZE = 14
+
class FilesPaths:
logging = None
transmission = None
@@ -56,6 +47,7 @@ class FilesPaths:
json_path = None
sammy_files_multi_mode = None
+
class FolderPaths:
working = None
stagging = None
@@ -69,18 +61,17 @@ class FolderPaths:
class ResonanceFitting(NormalizationTof):
-
df_to_use = None
- horizontal_box = None # total abundance display box
+ horizontal_box = None # total abundance display box
def __init__(self, working_dir=None, debug=False):
self.folder_paths = FolderPaths()
self.files_paths = FilesPaths()
-
+
self.initialize_logging()
load_dotenv(".envrc")
-
+
if debug:
self.folder_paths.working = Path(DEBUG_DATA.working_dir)
self.folder_paths.output = Path(DEBUG_DATA.output_folder)
@@ -105,7 +96,9 @@ def __init__(self, working_dir=None, debug=False):
# self.autoreduce_dir = autoreduce_dir[_beamline][0] + str(ipts) + autoreduce_dir[_beamline][1]
# self.shared_dir = str(Path(shared_dir[self.instrument][0]) / str(ipts) / shared_dir[self.instrument][1])
- self.folder_paths.shared = Path("/") / _facility / self.instrument / str(ipts) / "shared"
+ self.folder_paths.shared = (
+ Path("/") / _facility / self.instrument / str(ipts) / "shared"
+ )
notebook_logging.info(f"Instrument: {self.instrument}")
notebook_logging.info(f"Working dir: {self.folder_paths.working}")
@@ -115,16 +108,17 @@ def __init__(self, working_dir=None, debug=False):
notebook_logging.info(f"Shared dir: {self.folder_paths.shared}")
def initialize_logging(self):
-
- logger.remove() # Remove default logger
+ logger.remove() # Remove default logger
LOG_PATH = "/SNS/VENUS/shared/log/"
base_file_name = Path(__file__).name
file_name_without_extension = Path(base_file_name).stem
user_name = os.getlogin() # add user name to the log file name
- log_file_name = LOG_PATH / Path(f"{user_name}_{str(file_name_without_extension)}.log")
+ log_file_name = LOG_PATH / Path(
+ f"{user_name}_{str(file_name_without_extension)}.log"
+ )
self.files_paths.logging = log_file_name
-
+
# rotating_handler = RotatingFileHandler(log_file_name,
# maxBytes=50*1024*1024, # 50 MB
# backupCount=5)
@@ -140,8 +134,14 @@ def initialize_logging(self):
format="[%(levelname)s] - %(asctime)s - %(message)s",
level=notebook_logging.INFO,
)
- notebook_logging.info(f"*** Starting a new script {file_name_without_extension} ***")
- display(HTML(f"Log file: {log_file_name}"))
+ notebook_logging.info(
+ f"*** Starting a new script {file_name_without_extension} ***"
+ )
+ display(
+ HTML(
+ f"Log file: {log_file_name}"
+ )
+ )
def select_normalized_text_file(self):
self.file_selector = FileSelectorPanel(
@@ -169,7 +169,7 @@ def select_isotope_and_abundance(self):
list_elements = periodictable.elements
dict_elements = {}
for _el in list_elements:
- dict_elements[_el.name.capitalize()] = {'symbol': _el.symbol}
+ dict_elements[_el.name.capitalize()] = {"symbol": _el.symbol}
list_elements_names = list(dict_elements.keys())
list_elements_names.sort()
self.dict_elements = dict_elements
@@ -177,13 +177,17 @@ def select_isotope_and_abundance(self):
if self.debug:
default_symbol_selected = DEBUG_DATA.isotope_element
for _el_name in dict_elements.keys():
- if dict_elements[_el_name]['symbol'] == default_symbol_selected:
+ if dict_elements[_el_name]["symbol"] == default_symbol_selected:
default_element_selected = _el_name
break
else:
default_element_selected = "Hydrogen"
- display(HTML(f"Select element/isotopes to use:"))
+ display(
+ HTML(
+ f"Select element/isotopes to use:"
+ )
+ )
self.list_elements_widget = widgets.Dropdown(
options=list_elements_names,
value=default_element_selected,
@@ -191,12 +195,12 @@ def select_isotope_and_abundance(self):
disabled=False,
)
display(self.list_elements_widget)
- self.list_elements_widget.observe(self._on_element_change, names='value')
-
+ self.list_elements_widget.observe(self._on_element_change, names="value")
+
self._display_tables_and_buttons()
# empty stylesheet table for now
- _df = pd.DataFrame({'Isotope': [None], 'Abundance (%)': [0]})
+ _df = pd.DataFrame({"Isotope": [None], "Abundance (%)": [0]})
self.isotope_to_use_sheet = from_dataframe(_df)
self.df_to_use = _df
display(self.isotope_to_use_sheet)
@@ -210,8 +214,12 @@ def perform_fitting(self):
self._sammy_files_multi_mode()
self._local_sammy_config()
self._multi_isotope_sammy_execution()
-
- display(HTML(f"SAMMY input files created in: {self.folder_paths.sammy_working}!") )
+
+ display(
+ HTML(
+ f"SAMMY input files created in: {self.folder_paths.sammy_working}!"
+ )
+ )
def display_results(self):
self._results_analysis()
@@ -226,7 +234,11 @@ def _output_folder_selected(self, folder_path):
def _transmitted_text_file_selected(self, file_path):
file_path = Path(file_path)
notebook_logging.info(f"Transmitted text file selected: {file_path}")
- display(HTML(f"Transmission file: {file_path.name} ... selected!"))
+ display(
+ HTML(
+ f"Transmission file: {file_path.name} ... selected!"
+ )
+ )
self.files_paths.transmission = file_path
@@ -234,36 +246,43 @@ def _transmitted_text_file_selected(self, file_path):
def _display_transmitted_data(self):
notebook_logging.info("Displaying transmitted data ...")
- df = pd.read_csv(self.files_paths.transmission,
- delim_whitespace=True,
- names=['Energy (eV)', 'Transmission', 'Uncertainty'],
- index_col=False,
- skiprows=1)
- display(HTML(f"Transmitted data preview:"))
+ df = pd.read_csv(
+ self.files_paths.transmission,
+ delim_whitespace=True,
+ names=["Energy (eV)", "Transmission", "Uncertainty"],
+ index_col=False,
+ skiprows=1,
+ )
+ display(
+ HTML(
+ f"Transmitted data preview:"
+ )
+ )
display(df.head(10))
-
+
fig, ax = plt.subplots(figsize=(10, 6))
- ax.errorbar(df['Energy (eV)'],
- df['Transmission'],
- yerr=df['Uncertainty'],
- fmt='o',
- markersize=3,
- label=str(self.files_paths.transmission.name),
- color='blue',
- ecolor='lightgray',
- elinewidth=1,
- capsize=2)
- ax.set_xlabel('Energy (eV)')
- ax.set_xscale('log')
- ax.set_yscale('log')
+ ax.errorbar(
+ df["Energy (eV)"],
+ df["Transmission"],
+ yerr=df["Uncertainty"],
+ fmt="o",
+ markersize=3,
+ label=str(self.files_paths.transmission.name),
+ color="blue",
+ ecolor="lightgray",
+ elinewidth=1,
+ capsize=2,
+ )
+ ax.set_xlabel("Energy (eV)")
+ ax.set_xscale("log")
+ ax.set_yscale("log")
ax.grid(True, which="both", ls="--", lw=0.5)
- ax.set_ylabel('Transmission')
- ax.set_title(f'Transmitted Data with Uncertainty')
+ ax.set_ylabel("Transmission")
+ ax.set_title("Transmitted Data with Uncertainty")
ax.legend()
plt.show()
def _stagging_folders_setup(self, file_path):
-
# set up various stagging folder for SAMMY
self.folder_paths.output = Path(file_path)
self.folder_paths.stagging = self.folder_paths.output / "hf_analysis"
@@ -279,32 +298,64 @@ def _stagging_folders_setup(self, file_path):
Path(self.folder_paths.sammy_working).mkdir(parents=True, exist_ok=True)
Path(self.folder_paths.sammy_output).mkdir(parents=True, exist_ok=True)
-
- notebook_logging.info("\nStagging folders setup:")
- notebook_logging.info(f"output folder: {self.folder_paths.output} ... {self.folder_paths.output.is_dir()} ")
- notebook_logging.info(f"Stagging folder: {self.folder_paths.stagging} ... {self.folder_paths.stagging.is_dir()}")
- notebook_logging.info(f"Spectra folder: {self.folder_paths.spectra} ... {self.folder_paths.spectra.is_dir()}")
- notebook_logging.info(f"Twenty folder: {self.folder_paths.twenty} ... {self.folder_paths.twenty.is_dir()}")
- notebook_logging.info(f"SAMMY working folder: {self.folder_paths.sammy_working} ... {self.folder_paths.sammy_working.is_dir()}")
- notebook_logging.info(f"SAMMY output folder: {self.folder_paths.sammy_output} ... {self.folder_paths.sammy_output.is_dir()}")
- display(HTML(f"Stagging folders created!"))
+ notebook_logging.info("\nStagging folders setup:")
+ notebook_logging.info(
+ f"output folder: {self.folder_paths.output} ... {self.folder_paths.output.is_dir()} "
+ )
+ notebook_logging.info(
+ f"Stagging folder: {self.folder_paths.stagging} ... {self.folder_paths.stagging.is_dir()}"
+ )
+ notebook_logging.info(
+ f"Spectra folder: {self.folder_paths.spectra} ... {self.folder_paths.spectra.is_dir()}"
+ )
+ notebook_logging.info(
+ f"Twenty folder: {self.folder_paths.twenty} ... {self.folder_paths.twenty.is_dir()}"
+ )
+ notebook_logging.info(
+ f"SAMMY working folder: {self.folder_paths.sammy_working} ... {self.folder_paths.sammy_working.is_dir()}"
+ )
+ notebook_logging.info(
+ f"SAMMY output folder: {self.folder_paths.sammy_output} ... {self.folder_paths.sammy_output.is_dir()}"
+ )
+ display(
+ HTML(
+ f"Stagging folders created!"
+ )
+ )
notebook_logging.info("")
def _converting_transmission_to_twenty_format(self):
- notebook_logging.info("Converting transmission data .txt to .twenty format for SAMMY ...")
- twenty_file = self.folder_paths.twenty / self.files_paths.transmission.name.replace(".txt", ".twenty")
+ notebook_logging.info(
+ "Converting transmission data .txt to .twenty format for SAMMY ..."
+ )
+ twenty_file = (
+ self.folder_paths.twenty
+ / self.files_paths.transmission.name.replace(".txt", ".twenty")
+ )
convert_csv_to_sammy_twenty(self.files_paths.transmission, twenty_file)
if validate_sammy_twenty_format(twenty_file):
- notebook_logging.info(f"Conversion successful! Twenty file created at: {twenty_file}")
- display(HTML(f"Twenty file created at: {twenty_file}"))
+ notebook_logging.info(
+ f"Conversion successful! Twenty file created at: {twenty_file}"
+ )
+ display(
+ HTML(
+ f"Twenty file created at: {twenty_file}"
+ )
+ )
else:
- notebook_logging.error("Conversion failed! The generated .twenty file is not valid.")
- display(HTML(f"Conversion failed! The generated .twenty file is not valid."))
+ notebook_logging.error(
+ "Conversion failed! The generated .twenty file is not valid."
+ )
+ display(
+ HTML(
+ f"Conversion failed! The generated .twenty file is not valid."
+ )
+ )
def _on_element_change(self, change):
notebook_logging.info(f"Element selected: {change['new']}")
self.isotope_sheet.close()
- element_symbol = self.dict_elements[change['new']]['symbol']
+ element_symbol = self.dict_elements[change["new"]]["symbol"]
self._create_and_display_isotope_table(element_symbol=element_symbol)
self.isotope_to_use_sheet.close()
@@ -321,29 +372,29 @@ def _on_element_change(self, change):
def _get_dict_isotopes(self, element_symbol):
"""
Get a dictionary of isotopes and their abundances for a given element.
-
+
dict = {[isotope_name]: {'abundance': None, 'mass': None}}
-
+
return dict
"""
# element = periodictable.elements.symbol(element_name)
_dict = {}
for _el in getattr(periodictable, element_symbol):
- _dict[str(_el)] = {'abundance': _el.abundance,
- 'mass': _el.mass}
+ _dict[str(_el)] = {"abundance": _el.abundance, "mass": _el.mass}
notebook_logging.info(f"in get_dict_isotopes: {element_symbol = }, {_dict = }")
return _dict
def _create_and_display_isotope_table(self, element_symbol):
-
dict_isotopes = self._get_dict_isotopes(element_symbol)
list_isotopes_for_this_element = dict_isotopes.keys()
notebook_logging.info(f"{list_isotopes_for_this_element}")
# list_mass_isotopes = [dict_isotopes[iso]['mass'] for iso in list_isotopes_for_this_element]
- list_abundance_isotopes = [dict_isotopes[iso]['abundance'] for iso in list_isotopes_for_this_element]
-
+ list_abundance_isotopes = [
+ dict_isotopes[iso]["abundance"] for iso in list_isotopes_for_this_element
+ ]
+
# create a boolean array of the same length as list_isotopes_for_this_element
list_use_it = np.array([False for _ in list_isotopes_for_this_element])
@@ -351,10 +402,12 @@ def _create_and_display_isotope_table(self, element_symbol):
if value > 0:
list_use_it[_index] = True
- temp_dict = {'Isotope': np.array(list_isotopes_for_this_element),
- 'Abundance (%)': np.array(list_abundance_isotopes),
- 'use it': list_use_it}
-
+ temp_dict = {
+ "Isotope": np.array(list_isotopes_for_this_element),
+ "Abundance (%)": np.array(list_abundance_isotopes),
+ "use it": list_use_it,
+ }
+
df = pd.DataFrame(temp_dict)
self.isotope_sheet = from_dataframe(df)
@@ -370,27 +423,39 @@ def on_validate_isotope_selection(self, b):
array = to_array(self.isotope_sheet)
notebook_logging.info(f"Isotope selection as array:\n{array}")
-
+
for _index, row in enumerate(array):
- notebook_logging.info(f"at {_index =}, {row[0] = }, {row[1] = }, {row[2] = }")
+ notebook_logging.info(
+ f"at {_index =}, {row[0] = }, {row[1] = }, {row[2] = }"
+ )
# retrieve the content of the isotope_to_use_sheet
df_to_use = ipysheet.to_dataframe(self.isotope_to_use_sheet)
# remove any row with 'Isotope' = None
- df_to_use = df_to_use[df_to_use['Isotope'].notna()]
+ df_to_use = df_to_use[df_to_use["Isotope"].notna()]
notebook_logging.info(f"Current isotopes to use:\n{df_to_use}")
# add it the new isotopes selected with 'use it' = True
for _index, row in enumerate(array):
notebook_logging.info(f"Processing row {_index}: {row}, {row[2] =}")
- if str(row[2]) == 'True': # 'use it' is True
+ if str(row[2]) == "True": # 'use it' is True
isotope_name = row[0]
abundance = f"{float(str(row[1])):.4f}"
- notebook_logging.info(f"Adding isotope: {isotope_name} with abundance: {abundance}")
- df_to_use = pd.concat([df_to_use, pd.DataFrame({'Isotope': [isotope_name], 'Abundance (%)': [abundance]})], ignore_index=True)
-
- # remove duplicates
- self.df_to_use = df_to_use.drop_duplicates(subset='Isotope')
+ notebook_logging.info(
+ f"Adding isotope: {isotope_name} with abundance: {abundance}"
+ )
+ df_to_use = pd.concat(
+ [
+ df_to_use,
+ pd.DataFrame(
+ {"Isotope": [isotope_name], "Abundance (%)": [abundance]}
+ ),
+ ],
+ ignore_index=True,
+ )
+
+ # remove duplicates
+ self.df_to_use = df_to_use.drop_duplicates(subset="Isotope")
self.isotope_to_use_sheet.close()
self.validate_isotope_button.close()
@@ -400,29 +465,30 @@ def on_validate_isotope_selection(self, b):
# listen to all events in this table
for cell in self.isotope_to_use_sheet.cells:
- cell.observe(self._on_isotope_to_use_table_change, names='value')
+ cell.observe(self._on_isotope_to_use_table_change, names="value")
self._display_tables_and_buttons()
display(self.isotope_to_use_sheet)
-
+
self._update_total_abundance_of_isotopes_to_use()
# disable button (to make sure only 1 element is added at a time)
- # self.validate_isotope_button.disabled = True
+ # self.validate_isotope_button.disabled = True
def _on_isotope_to_use_table_change(self, change):
self._update_total_abundance_of_isotopes_to_use()
def _update_total_abundance_of_isotopes_to_use(self):
-
if self.horizontal_box:
self.horizontal_box.close()
df_to_use = ipysheet.to_dataframe(self.isotope_to_use_sheet)
- list_abundances = df_to_use['Abundance (%)'].tolist()
+ list_abundances = df_to_use["Abundance (%)"].tolist()
list_abundances_float = [float(_value) for _value in list_abundances]
total_abundance = sum(list_abundances_float)
- notebook_logging.info(f"Total abundance of isotopes to use: {total_abundance} %")
+ notebook_logging.info(
+ f"Total abundance of isotopes to use: {total_abundance} %"
+ )
if total_abundance > 100.0:
color = "red"
@@ -436,16 +502,15 @@ def _update_total_abundance_of_isotopes_to_use(self):
display(self.horizontal_box)
def _display_tables_and_buttons(self):
- """display the isotope table. the button to validate the selection as well as the table of isotopes to use
- """
-
+ """display the isotope table. the button to validate the selection as well as the table of isotopes to use"""
+
dict_elements = self.dict_elements
full_name_of_element = self.list_elements_widget.value
- element_symbol = dict_elements[full_name_of_element]['symbol']
+ element_symbol = dict_elements[full_name_of_element]["symbol"]
self._create_and_display_isotope_table(element_symbol=element_symbol)
-
+
self.validate_isotope_button = widgets.Button(
description="Add to list of elements/isotopes to consider",
layout=widgets.Layout(width="100%"),
@@ -464,11 +529,11 @@ def _reformat_list_isotopes(self, list_isotopes):
"""
list_reformatted = []
for _iso in list_isotopes:
- if '-' not in _iso:
+ if "-" not in _iso:
list_reformatted.append(_iso)
continue
-
- parts = _iso.split('-')
+
+ parts = _iso.split("-")
if len(parts) == 2:
reformatted = f"{parts[1]}-{parts[0]}"
list_reformatted.append(reformatted)
@@ -486,10 +551,10 @@ def _create_json_manager(self):
# retrieve isotopes and abundances to use
df_to_use = ipysheet.to_dataframe(self.isotope_to_use_sheet)
- list_isotopes = df_to_use['Isotope'].tolist()
+ list_isotopes = df_to_use["Isotope"].tolist()
list_isotopes_reformatted = self._reformat_list_isotopes(list_isotopes)
- list_abundances = df_to_use['Abundance (%)'].tolist()
- list_abundances_float = [float(_value)*0.01 for _value in list_abundances]
+ list_abundances = df_to_use["Abundance (%)"].tolist()
+ list_abundances_float = [float(_value) * 0.01 for _value in list_abundances]
notebook_logging.info(f"\t{list_isotopes = }")
notebook_logging.info(f"\t{list_isotopes_reformatted =}")
@@ -501,23 +566,32 @@ def _create_json_manager(self):
isotopes=list_isotopes_reformatted,
abundances=list_abundances_float,
working_dir=self.folder_paths.stagging,
- custom_global_settings={"forceRMoore": "yes",
- "purgeSpinGroups": "yes",
- "fudge": "0.7"}
+ custom_global_settings={
+ "forceRMoore": "yes",
+ "purgeSpinGroups": "yes",
+ "fudge": "0.7",
+ },
)
self.files_paths.json_path = json_path
notebook_logging.info(f"Configuration file created at: {json_path}")
- endf_files = [f for f in os.listdir(self.folder_paths.stagging) if f.endswith('.par')]
- notebook_logging.info(f"ENDf files found in working directory: {len(endf_files)} files")
+ endf_files = [
+ f for f in os.listdir(self.folder_paths.stagging) if f.endswith(".par")
+ ]
+ notebook_logging.info(
+ f"ENDf files found in working directory: {len(endf_files)} files"
+ )
for f in sorted(endf_files):
notebook_logging.info(f"\t- {f}")
- display(HTML(f"Configuration file created at: {json_path}"))
+ display(
+ HTML(
+ f"Configuration file created at: {json_path}"
+ )
+ )
notebook_logging.info("")
def _setup_element_manager(self):
-
# from all the elements selected, let's find out the one with the most abundant isotope
table_array = to_array(self.isotope_to_use_sheet)
max_abundance = 0.0
@@ -526,92 +600,128 @@ def _setup_element_manager(self):
if _float_abundance > max_abundance:
max_abundance = _float_abundance
most_abundant_isotope = _element
-
- notebook_logging.info(f"Most abundant isotope selected: {most_abundant_isotope}")
- mass_number_str, element_symbol = most_abundant_isotope.split('-')
+ notebook_logging.info(
+ f"Most abundant isotope selected: {most_abundant_isotope}"
+ )
+
+ mass_number_str, element_symbol = most_abundant_isotope.split("-")
my_periodic_table_element = getattr(periodictable, element_symbol)
self.most_abundant_element_symbol = element_symbol
- display(HTML(f"Most Abundant Element Selected: {element_symbol}"))
+ display(
+ HTML(
+ f"Most Abundant Element Selected: {element_symbol}"
+ )
+ )
label_width = "160px"
text_width = "80px"
# mass number of the element selected
- _label_left = widgets.HTML("Mass number:
",
- layout=widgets.Layout(width=label_width))
- self.mass_number_widget = widgets.IntText(value=int(mass_number_str),
- disabled=False,
- layout=widgets.Layout(width=text_width))
- _hori_layout_1 = widgets.HBox([_label_left,
- self.mass_number_widget])
+ _label_left = widgets.HTML(
+ "Mass number:
",
+ layout=widgets.Layout(width=label_width),
+ )
+ self.mass_number_widget = widgets.IntText(
+ value=int(mass_number_str),
+ disabled=False,
+ layout=widgets.Layout(width=text_width),
+ )
+ _hori_layout_1 = widgets.HBox([_label_left, self.mass_number_widget])
display(_hori_layout_1)
# density (g/cm^3)
-
- _label_left = widgets.HTML("Density (g/cm3):
",
- layout=widgets.Layout(width=label_width))
- self.density_widget = widgets.FloatText(value=my_periodic_table_element.density,
- disabled=False,
- layout=widgets.Layout(width=text_width))
+
+ _label_left = widgets.HTML(
+ "Density (g/cm3):
",
+ layout=widgets.Layout(width=label_width),
+ )
+ self.density_widget = widgets.FloatText(
+ value=my_periodic_table_element.density,
+ disabled=False,
+ layout=widgets.Layout(width=text_width),
+ )
_hori_layout_2 = widgets.HBox([_label_left, self.density_widget])
display(_hori_layout_2)
# thickness (mm)
- _label_left = widgets.HTML("Thickness (mm):
",
- layout=widgets.Layout(width=label_width))
- self.thickness_widget = widgets.FloatText(value=0.05,
- disabled=False,
- layout=widgets.Layout(width=text_width))
+ _label_left = widgets.HTML(
+ "Thickness (mm):
",
+ layout=widgets.Layout(width=label_width),
+ )
+ self.thickness_widget = widgets.FloatText(
+ value=0.05, disabled=False, layout=widgets.Layout(width=text_width)
+ )
_hori_layout_3 = widgets.HBox([_label_left, self.thickness_widget])
display(_hori_layout_3)
# atomic mass amu
- _label_left = widgets.HTML("Atomic mass (amu):
",
- layout=widgets.Layout(width=label_width))
- self.atomic_mass_widget = widgets.FloatText(value=my_periodic_table_element.mass,
- disabled=False,
- layout=widgets.Layout(width=text_width))
+ _label_left = widgets.HTML(
+ "Atomic mass (amu):
",
+ layout=widgets.Layout(width=label_width),
+ )
+ self.atomic_mass_widget = widgets.FloatText(
+ value=my_periodic_table_element.mass,
+ disabled=False,
+ layout=widgets.Layout(width=text_width),
+ )
_hori_layout_4 = widgets.HBox([_label_left, self.atomic_mass_widget])
- display(_hori_layout_4)
+ display(_hori_layout_4)
# abundance (%)
- _label_left = widgets.HTML("Abundance (%):
",
- layout=widgets.Layout(width=label_width))
- self.abundance_widget = widgets.FloatSlider(value=100.0, min=0, max=100, step=0.1, disabled=False)
+ _label_left = widgets.HTML(
+ "Abundance (%):
",
+ layout=widgets.Layout(width=label_width),
+ )
+ self.abundance_widget = widgets.FloatSlider(
+ value=100.0, min=0, max=100, step=0.1, disabled=False
+ )
_hori_layout_5 = widgets.HBox([_label_left, self.abundance_widget])
display(_hori_layout_5)
# energy range (ev)
- _label_left = widgets.HTML("Energy range (eV):
",
- layout=widgets.Layout(width=label_width))
- self.energy_range_widget = widgets.FloatRangeSlider(value=[1.0, 200.0], min=0, max=2000, step=0.1, disabled=False)
+ _label_left = widgets.HTML(
+ "Energy range (eV):
",
+ layout=widgets.Layout(width=label_width),
+ )
+ self.energy_range_widget = widgets.FloatRangeSlider(
+ value=[1.0, 200.0], min=0, max=2000, step=0.1, disabled=False
+ )
_hori_layout_6 = widgets.HBox([_label_left, self.energy_range_widget])
display(_hori_layout_6)
# temperature (K)
- _label_left = widgets.HTML("Temperature (K):
",
- layout=widgets.Layout(width=label_width))
- self.temperature_widget = widgets.FloatSlider(value=293.6, min=0, max=1000, step=0.1, disabled=False)
+ _label_left = widgets.HTML(
+ "Temperature (K):
",
+ layout=widgets.Layout(width=label_width),
+ )
+ self.temperature_widget = widgets.FloatSlider(
+ value=293.6, min=0, max=1000, step=0.1, disabled=False
+ )
_hori_layout_7 = widgets.HBox([_label_left, self.temperature_widget])
display(_hori_layout_7)
# title
- _label_left = widgets.HTML("Title:
",
- layout=widgets.Layout(width=label_width))
+ _label_left = widgets.HTML(
+ "Title:
",
+ layout=widgets.Layout(width=label_width),
+ )
o_get = Get(parent=self)
- _element_name = o_get.full_name_of_element_from_abreviation(self.most_abundant_element_symbol)
+ _element_name = o_get.full_name_of_element_from_abreviation(
+ self.most_abundant_element_symbol
+ )
_ipts = self.ipts
_instrument = self.instrument
_title = f"{_element_name} multi-isotope transmission analysis - {_instrument} {_ipts}"
- self.title_widget = widgets.Text(value=_title,
- disabled=False,
- layout=widgets.Layout(width="100%"))
- _hori_layout_8 = widgets.HBox([_label_left, self.title_widget], layout=widgets.Layout(width="100%"))
+ self.title_widget = widgets.Text(
+ value=_title, disabled=False, layout=widgets.Layout(width="100%")
+ )
+ _hori_layout_8 = widgets.HBox(
+ [_label_left, self.title_widget], layout=widgets.Layout(width="100%")
+ )
display(_hori_layout_8)
def _create_multi_isotope_inp(self):
-
notebook_logging.info("Creating SAMMY input file for resonance fitting ...")
_element = self.most_abundant_element_symbol
mass_number = self.mass_number_widget.value
@@ -626,18 +736,19 @@ def _create_multi_isotope_inp(self):
title = self.title_widget.value
material_props = {
- 'element': _element,
- 'mass_number': mass_number,
- 'density_g_cm3': density,
- 'thickness_mm': thickness,
- 'atomic_mass_amu': atomic_mass,
- 'abundance': abundance,
- 'min_energy_eV': min_energy,
- 'max_energy_eV': max_energy,
- 'temperature_K': temperature}
-
- notebook_logging.info(f"calling InpManager.create_multi_isotope_inp ...")
- notebook_logging.info(f"material_props:")
+ "element": _element,
+ "mass_number": mass_number,
+ "density_g_cm3": density,
+ "thickness_mm": thickness,
+ "atomic_mass_amu": atomic_mass,
+ "abundance": abundance,
+ "min_energy_eV": min_energy,
+ "max_energy_eV": max_energy,
+ "temperature_K": temperature,
+ }
+
+ notebook_logging.info("calling InpManager.create_multi_isotope_inp ...")
+ notebook_logging.info("material_props:")
display_dictionary_in_logging(material_props)
inp_file = self.folder_paths.stagging / "hf_fitting.inp"
self.files_paths.inp_file = inp_file
@@ -653,13 +764,18 @@ def _create_multi_isotope_inp(self):
)
notebook_logging.info(f"SAMMY input file created at: {inp_file}")
- notebook_logging.info(f"")
+ notebook_logging.info("")
def _sammy_files_multi_mode(self):
- notebook_logging.info("Creating SAMMY files for multi-isotope resonance fitting ...")
+ notebook_logging.info(
+ "Creating SAMMY files for multi-isotope resonance fitting ..."
+ )
transmission_file_path = self.files_paths.transmission.name
notebook_logging.info(f"\t{transmission_file_path = }")
- data_file = self.folder_paths.twenty / f"{transmission_file_path.replace('.txt', '.twenty')}"
+ data_file = (
+ self.folder_paths.twenty
+ / f"{transmission_file_path.replace('.txt', '.twenty')}"
+ )
notebook_logging.info(f"\t{data_file = }")
notebook_logging.info(f"\tjson-config: {self.files_paths.json_path = }")
@@ -667,11 +783,13 @@ def _sammy_files_multi_mode(self):
input_file=self.files_paths.inp_file,
json_config_file=self.files_paths.json_path,
data_file=data_file,
- endf_directory=self.folder_paths.stagging
+ endf_directory=self.folder_paths.stagging,
)
self.files_paths.sammy_files_multi_mode = files
notebook_logging.info(f"\t{files = }")
- notebook_logging.info("SAMMY files for multi-isotope resonance fitting created.")
+ notebook_logging.info(
+ "SAMMY files for multi-isotope resonance fitting created."
+ )
notebook_logging.info("")
def _local_sammy_config(self):
@@ -683,9 +801,11 @@ def _local_sammy_config(self):
notebook_logging.info(f"\t{working_directory = }")
notebook_logging.info(f"\t{output_directory = }")
- config = LocalSammyConfig(sammy_executable=sammy_executable,
- working_dir=working_directory,
- output_dir=output_directory)
+ config = LocalSammyConfig(
+ sammy_executable=sammy_executable,
+ working_dir=working_directory,
+ output_dir=output_directory,
+ )
notebook_logging.info(f"\t{config = }")
self.runner = LocalSammyRunner(config=config)
@@ -693,22 +813,32 @@ def _local_sammy_config(self):
notebook_logging.info("")
def _multi_isotope_sammy_execution(self):
- notebook_logging.info("Starting multi-isotope SAMMY resonance fitting execution ...")
+ notebook_logging.info(
+ "Starting multi-isotope SAMMY resonance fitting execution ..."
+ )
notebook_logging.info(f"\t{self.files_paths.sammy_files_multi_mode = }")
self.runner.prepare_environment(self.files_paths.sammy_files_multi_mode)
result = self.runner.execute_sammy(self.files_paths.sammy_files_multi_mode)
notebook_logging.info(f"\tresult: {result =}")
-
+
notebook_logging.info("SAMMY resonance fitting execution completed.")
notebook_logging.info(f"Execution status: {result.success}")
notebook_logging.info(f"Runtime: {result.runtime_seconds} seconds")
-
+
if result.error_message:
notebook_logging.error(f"Error message: {result.error_message}")
- display(HTML(f"Error during SAMMY execution: {result.error_message}") )
+ display(
+ HTML(
+ f"Error during SAMMY execution: {result.error_message}"
+ )
+ )
else:
- display(HTML(f"SAMMY execution completed successfully!") )
-
+ display(
+ HTML(
+ f"SAMMY execution completed successfully!"
+ )
+ )
+
self.runner.collect_outputs(result=result)
self.runner.cleanup()
@@ -721,20 +851,33 @@ def _results_analysis(self):
lst_file_path = self.folder_paths.sammy_output / "SAMMY.LST"
self.results_manager = ResultsManager(
- lpt_file_path=lpt_file_path,
- lst_file_path=lst_file_path
+ lpt_file_path=lpt_file_path, lst_file_path=lst_file_path
)
data = self.results_manager.get_data()
# self.results_manager
- notebook_logging.info(f"\t energy range: {data.energy.min():.3e} eV to {data.energy.max():.3e} eV")
+ notebook_logging.info(
+ f"\t energy range: {data.energy.min():.3e} eV to {data.energy.max():.3e} eV"
+ )
notebook_logging.info(f"\t data points: {len(data.energy)}")
- display(HTML(f"Results summary:"))
- display(HTML(f" Energy range: {data.energy.min():.3e} eV to {data.energy.max():.3e} eV"))
- display(HTML(f" Data points: {len(data.energy)}"))
+ display(
+ HTML(
+ f"Results summary:"
+ )
+ )
+ display(
+ HTML(
+ f" Energy range: {data.energy.min():.3e} eV to {data.energy.max():.3e} eV"
+ )
+ )
+ display(
+ HTML(
+ f" Data points: {len(data.energy)}"
+ )
+ )
fig = self.results_manager.plot_transmission(
figsize=(12, 8),
@@ -753,52 +896,68 @@ def _results_analysis(self):
notebook_logging.info("")
def _multi_isotope_fitting_quality_metrics(self):
-
results_manager = self.results_manager
-
+
if results_manager.run_results.fit_results:
print(f"Fit iterations: {len(results_manager.run_results.fit_results)}")
-
+
for i, fit_result in enumerate(results_manager.run_results.fit_results):
print(f"\nIteration {i+1}:")
-
+
chi_sq = fit_result.get_chi_squared_results()
if chi_sq.chi_squared is not None:
print(f" Chi-squared: {chi_sq.chi_squared:.4f}")
print(f" Data points: {chi_sq.dof}")
print(f" Reduced chi-squared: {chi_sq.reduced_chi_squared:.6f}")
-
+
physics = fit_result.get_physics_data()
- if hasattr(physics, 'broadening_parameters'):
+ if hasattr(physics, "broadening_parameters"):
broadening = physics.broadening_parameters
- if hasattr(broadening, 'thick') and broadening.thick is not None:
+ if hasattr(broadening, "thick") and broadening.thick is not None:
print(f" Number density: {broadening.thick:.6e} atoms/barn-cm")
print(f" Temperature: {broadening.temp:.2f} K")
-
+
# Multi-isotope abundances
nuclear = fit_result.get_nuclear_data()
- if hasattr(nuclear, 'isotopes') and nuclear.isotopes:
+ if hasattr(nuclear, "isotopes") and nuclear.isotopes:
print("\n Isotopic abundances:")
- hf_isotopes = ["Hf-174", "Hf-176", "Hf-177", "Hf-178", "Hf-179", "Hf-180"]
- natural_abundances = [0.0016, 0.0526, 0.1860, 0.2728, 0.1362, 0.3508]
-
+ hf_isotopes = [
+ "Hf-174",
+ "Hf-176",
+ "Hf-177",
+ "Hf-178",
+ "Hf-179",
+ "Hf-180",
+ ]
+ natural_abundances = [
+ 0.0016,
+ 0.0526,
+ 0.1860,
+ 0.2728,
+ 0.1362,
+ 0.3508,
+ ]
+
for j, isotope in enumerate(nuclear.isotopes):
- if j < len(hf_isotopes) and hasattr(isotope, 'abundance'):
+ if j < len(hf_isotopes) and hasattr(isotope, "abundance"):
fitted = isotope.abundance
natural = natural_abundances[j]
ratio = fitted / natural if natural > 0 else 0
- print(f" {hf_isotopes[j]}: fitted={fitted:.6f}, natural={natural:.4f}, ratio={ratio:.3f}")
+ print(
+ f" {hf_isotopes[j]}: fitted={fitted:.6f}, natural={natural:.4f}, ratio={ratio:.3f}"
+ )
# Final results
if len(results_manager.run_results.fit_results) > 0:
final_fit = results_manager.run_results.fit_results[-1]
final_chi = final_fit.get_chi_squared_results()
final_phys = final_fit.get_physics_data()
-
+
print("\nFinal multi-isotope fit results:")
if final_chi.reduced_chi_squared:
print(f" Reduced chi-squared: {final_chi.reduced_chi_squared:.6f}")
- if hasattr(final_phys, 'broadening_parameters'):
- if hasattr(final_phys.broadening_parameters, 'thick'):
- print(f" Number density: {final_phys.broadening_parameters.thick:.6e} atoms/barn-cm")
-
\ No newline at end of file
+ if hasattr(final_phys, "broadening_parameters"):
+ if hasattr(final_phys.broadening_parameters, "thick"):
+ print(
+ f" Number density: {final_phys.broadening_parameters.thick:.6e} atoms/barn-cm"
+ )
diff --git a/notebooks/__code/resonance_fitting/units.py b/notebooks/__code/resonance_fitting/units.py
index 8acb982f..43c1bc96 100644
--- a/notebooks/__code/resonance_fitting/units.py
+++ b/notebooks/__code/resonance_fitting/units.py
@@ -135,7 +135,9 @@ def convert_to_cross_section(from_unit, to_unit):
return conversion_factors[from_unit] / conversion_factors[to_unit]
-def convert_from_wavelength_to_energy_ev(wavelength, unit_from=DistanceUnitOptions.angstrom):
+def convert_from_wavelength_to_energy_ev(
+ wavelength, unit_from=DistanceUnitOptions.angstrom
+):
"""Convert wavelength to energy based on the given units.
Args:
@@ -177,15 +179,21 @@ def convert_array_from_time_to_lambda(
np.ndarray: Array of wavelength values.
"""
time_array_s = time_array * convert_time_units(time_unit, TimeUnitOptions.s)
- detector_offset_s = detector_offset * convert_time_units(detector_offset_unit, TimeUnitOptions.s)
+ detector_offset_s = detector_offset * convert_time_units(
+ detector_offset_unit, TimeUnitOptions.s
+ )
distance_source_detector_m = distance_source_detector * convert_distance_units(
distance_source_detector_unit, DistanceUnitOptions.m
)
h_over_mn = h / m_n
- lambda_m = h_over_mn * (time_array_s + detector_offset_s) / distance_source_detector_m
+ lambda_m = (
+ h_over_mn * (time_array_s + detector_offset_s) / distance_source_detector_m
+ )
- lambda_converted = lambda_m * convert_distance_units(DistanceUnitOptions.m, lambda_unit)
+ lambda_converted = lambda_m * convert_distance_units(
+ DistanceUnitOptions.m, lambda_unit
+ )
return lambda_converted
@@ -225,13 +233,22 @@ def convert_array_from_time_to_energy(
detector_units_factor = convert_time_units(detector_offset_unit, TimeUnitOptions.s)
detector_offset = detector_units_factor * detector_offset
- distance_source_detector_factor = convert_distance_units(distance_source_detector_unit, DistanceUnitOptions.m)
- distance_source_detector_m = distance_source_detector * distance_source_detector_factor
+ distance_source_detector_factor = convert_distance_units(
+ distance_source_detector_unit, DistanceUnitOptions.m
+ )
+ distance_source_detector_m = (
+ distance_source_detector * distance_source_detector_factor
+ )
# Calculate the energy in eV using the formula E_ev = 1/2 m_n (L/t_tof)^2 / electron_volt
full_time_array_s = time_array_s + detector_offset
- energy_array_ev = 0.5 * m_n * (distance_source_detector_m / full_time_array_s) ** 2 / electron_volt
+ energy_array_ev = (
+ 0.5
+ * m_n
+ * (distance_source_detector_m / full_time_array_s) ** 2
+ / electron_volt
+ )
energy_array_factor = convert_to_energy(EnergyUnitOptions.eV, energy_unit)
energy_array = energy_array_ev * energy_array_factor
diff --git a/notebooks/__code/roi_selection_ui.py b/notebooks/__code/roi_selection_ui.py
index 492ae61a..642bd886 100755
--- a/notebooks/__code/roi_selection_ui.py
+++ b/notebooks/__code/roi_selection_ui.py
@@ -7,7 +7,13 @@
from IPython.display import HTML, display
from NeuNorm.normalization import Normalization
from qtpy import QtGui
-from qtpy.QtWidgets import QMainWindow, QProgressBar, QTableWidgetItem, QTableWidgetSelectionRange, QVBoxLayout
+from qtpy.QtWidgets import (
+ QMainWindow,
+ QProgressBar,
+ QTableWidgetItem,
+ QTableWidgetSelectionRange,
+ QVBoxLayout,
+)
from __code import load_ui
from __code.config import (
@@ -65,7 +71,9 @@ def __init__(
self.list_of_files = list_of_files
if percentage_of_data_to_use is None:
- percentage_of_data_to_use = percentage_of_images_to_use_for_roi_selection
+ percentage_of_data_to_use = (
+ percentage_of_images_to_use_for_roi_selection
+ )
self.percentage_of_data_to_use = percentage_of_data_to_use
# method called when leaving the application, if any
@@ -73,7 +81,8 @@ def __init__(
super(QMainWindow, self).__init__(parent)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(__file__)), os.path.join("ui", "ui_roi_selection.ui")
+ os.path.dirname(os.path.dirname(__file__)),
+ os.path.join("ui", "ui_roi_selection.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
@@ -134,7 +143,9 @@ def __built_html_table_row_3_columns(self, name, nbr, height, width):
def recap(self):
"""Display nbr of files loaded and size. This can be used to figure why a normalization failed"""
- [nbr_sample, height_sample, width_sample] = self.__get_recap(self.o_norm.data["sample"]["data"])
+ [nbr_sample, height_sample, width_sample] = self.__get_recap(
+ self.o_norm.data["sample"]["data"]
+ )
[nbr_ob, height_ob, width_ob] = self.__get_recap(self.o_norm.data["ob"]["data"])
[nbr_df, height_df, width_df] = self.__get_recap(self.o_norm.data["df"]["data"])
@@ -142,7 +153,9 @@ def recap(self):
'| Type | Number | '
+ "Size (height*width) |
"
)
- html += self.__built_html_table_row_3_columns("sample", nbr_sample, height_sample, width_sample)
+ html += self.__built_html_table_row_3_columns(
+ "sample", nbr_sample, height_sample, width_sample
+ )
html += self.__built_html_table_row_3_columns("ob", nbr_ob, height_ob, width_ob)
html += self.__built_html_table_row_3_columns("df", nbr_df, height_df, width_df)
html += "
"
@@ -169,7 +182,9 @@ def integrate_images(self):
random_list = random.sample(range(0, nbr_files), nbr_files_to_use)
if self.o_norm:
- list_data_to_use = [self.o_norm.data["sample"]["data"][_index] for _index in random_list]
+ list_data_to_use = [
+ self.o_norm.data["sample"]["data"][_index] for _index in random_list
+ ]
else:
o_norm = Normalization()
list_of_files = np.array(self.list_of_files)
@@ -367,7 +382,9 @@ def roi_manually_moved(self):
_roi = list_roi[_row]
roi_id = _roi["id"]
- region = roi_id.getArraySlice(self.integrated_image, self.ui.image_view.imageItem)
+ region = roi_id.getArraySlice(
+ self.integrated_image, self.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop
@@ -441,7 +458,9 @@ def add_roi_button_clicked(self):
width_int = np.abs(x0_int - int(_x1))
height_int = np.abs(y0_int - int(_y1))
- _roi_id = self.init_roi(x0=x0_int, y0=y0_int, width=width_int, height=height_int)
+ _roi_id = self.init_roi(
+ x0=x0_int, y0=y0_int, width=width_int, height=height_int
+ )
_roi["id"] = _roi_id
list_roi[_row] = _roi
diff --git a/notebooks/__code/roi_statistics_vs_stack/display.py b/notebooks/__code/roi_statistics_vs_stack/display.py
index 2111f2e4..36b8c1bf 100755
--- a/notebooks/__code/roi_statistics_vs_stack/display.py
+++ b/notebooks/__code/roi_statistics_vs_stack/display.py
@@ -26,7 +26,8 @@ def update_image_view(self, slider_value=0):
if not first_update:
_histo_widget.setLevels(
- self.parent.image_view_histogram_level[0], self.parent.image_view_histogram_level[1]
+ self.parent.image_view_histogram_level[0],
+ self.parent.image_view_histogram_level[1],
)
def get_x_axis(self):
@@ -43,27 +44,37 @@ def update_statistics_plot(self):
nbr_plot = 0
if self.parent.ui.mean_checkBox.isChecked():
y_axis_mean = self.parent.y_axis["mean"]
- self.parent.statistics_plot.axes.plot(x_axis, y_axis_mean, "bv", label="mean")
+ self.parent.statistics_plot.axes.plot(
+ x_axis, y_axis_mean, "bv", label="mean"
+ )
nbr_plot += 1
if self.parent.ui.min_checkBox.isChecked():
y_axis_mean = self.parent.y_axis["min"]
- self.parent.statistics_plot.axes.plot(x_axis, y_axis_mean, "r*", label="min")
+ self.parent.statistics_plot.axes.plot(
+ x_axis, y_axis_mean, "r*", label="min"
+ )
nbr_plot += 1
if self.parent.ui.max_checkBox.isChecked():
y_axis_mean = self.parent.y_axis["max"]
- self.parent.statistics_plot.axes.plot(x_axis, y_axis_mean, "r+", label="max")
+ self.parent.statistics_plot.axes.plot(
+ x_axis, y_axis_mean, "r+", label="max"
+ )
nbr_plot += 1
if self.parent.ui.median_checkBox.isChecked():
y_axis_mean = self.parent.y_axis["median"]
- self.parent.statistics_plot.axes.plot(x_axis, y_axis_mean, "gp", label="median")
+ self.parent.statistics_plot.axes.plot(
+ x_axis, y_axis_mean, "gp", label="median"
+ )
nbr_plot += 1
if self.parent.ui.std_checkBox.isChecked():
y_axis_mean = self.parent.y_axis["std"]
- self.parent.statistics_plot.axes.plot(x_axis, y_axis_mean, "cx", label="std")
+ self.parent.statistics_plot.axes.plot(
+ x_axis, y_axis_mean, "cx", label="std"
+ )
nbr_plot += 1
if nbr_plot > 0:
diff --git a/notebooks/__code/roi_statistics_vs_stack/event_handler.py b/notebooks/__code/roi_statistics_vs_stack/event_handler.py
index f7b08ada..e6c0d976 100755
--- a/notebooks/__code/roi_statistics_vs_stack/event_handler.py
+++ b/notebooks/__code/roi_statistics_vs_stack/event_handler.py
@@ -10,7 +10,9 @@ def __init__(self, parent=None):
self.parent = parent
def recalculate_table(self):
- region = self.parent.ui.roi.getArraySlice(self.parent.live_image, self.parent.ui.image_view.imageItem)
+ region = self.parent.ui.roi.getArraySlice(
+ self.parent.live_image, self.parent.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop - 1
@@ -62,10 +64,24 @@ def update_table(self):
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
for _row in data_dict.keys():
_entry = data_dict[_row]
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.min, value=_entry["min"], editable=False)
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.max, value=_entry["max"], editable=False)
o_table.insert_item(
- row=_row, column=StatisticsColumnIndex.mean, value=_entry["mean"], format_str="{:0.2f}", editable=False
+ row=_row,
+ column=StatisticsColumnIndex.min,
+ value=_entry["min"],
+ editable=False,
+ )
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.max,
+ value=_entry["max"],
+ editable=False,
+ )
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.mean,
+ value=_entry["mean"],
+ format_str="{:0.2f}",
+ editable=False,
)
o_table.insert_item(
row=_row,
@@ -75,7 +91,11 @@ def update_table(self):
editable=False,
)
o_table.insert_item(
- row=_row, column=StatisticsColumnIndex.std, value=_entry["std"], format_str="{:0.2f}", editable=False
+ row=_row,
+ column=StatisticsColumnIndex.std,
+ value=_entry["std"],
+ format_str="{:0.2f}",
+ editable=False,
)
def reset_table_plot(self):
diff --git a/notebooks/__code/roi_statistics_vs_stack/export.py b/notebooks/__code/roi_statistics_vs_stack/export.py
index 62373bd8..2430f1cb 100755
--- a/notebooks/__code/roi_statistics_vs_stack/export.py
+++ b/notebooks/__code/roi_statistics_vs_stack/export.py
@@ -14,12 +14,16 @@ def __init__(self, parent=None):
def export(self):
base_folder = os.path.basename(self.parent.working_folder)
_export_folder = QFileDialog.getExistingDirectory(
- self.parent, directory=os.path.dirname(base_folder), caption="Select Output Folder"
+ self.parent,
+ directory=os.path.dirname(base_folder),
+ caption="Select Output Folder",
)
if _export_folder:
output_base_file_name = str(base_folder) + "_statistics.txt"
- full_output_base_file_name = os.path.join(_export_folder, output_base_file_name)
+ full_output_base_file_name = os.path.join(
+ _export_folder, output_base_file_name
+ )
x_axis = self.parent.x_axis
time_offset_array = x_axis["time_offset"]
@@ -37,10 +41,13 @@ def export(self):
metadata = ["# Statistics created with roi_statistics_vs_stack notebook"]
metadata.append(f"# working dir: {self.parent.working_folder}")
metadata.append(
- f"# roi selected: x0:{roi['x0']}, y0:{roi['y0']}, " f"width:{roi['width']}, height:{roi['height']}"
+ f"# roi selected: x0:{roi['x0']}, y0:{roi['y0']}, "
+ f"width:{roi['width']}, height:{roi['height']}"
)
metadata.append("#")
- metadata.append("#file index, file name, time offset (s), min, max, mean, median, standard deviation")
+ metadata.append(
+ "#file index, file name, time offset (s), min, max, mean, median, standard deviation"
+ )
data = []
for _row in np.arange(len(list_of_images)):
@@ -53,12 +60,26 @@ def export(self):
_median = median_array[_row]
_std = std_array[_row]
- _row = [_file_index, _file_name, _time_offset, _min, _max, _mean, _median, _std]
+ _row = [
+ _file_index,
+ _file_name,
+ _time_offset,
+ _min,
+ _max,
+ _mean,
+ _median,
+ _std,
+ ]
_row_str = [str(_entry) for _entry in _row]
_row_str_formatted = ",".join(_row_str)
data.append(_row_str_formatted)
- make_ascii_file(metadata=metadata, data=data, output_file_name=full_output_base_file_name, dim="1d")
+ make_ascii_file(
+ metadata=metadata,
+ data=data,
+ output_file_name=full_output_base_file_name,
+ dim="1d",
+ )
show_status_message(
parent=self.parent,
diff --git a/notebooks/__code/roi_statistics_vs_stack/initialization.py b/notebooks/__code/roi_statistics_vs_stack/initialization.py
index ee192c66..e101bb7f 100755
--- a/notebooks/__code/roi_statistics_vs_stack/initialization.py
+++ b/notebooks/__code/roi_statistics_vs_stack/initialization.py
@@ -36,7 +36,9 @@ def pyqtgraph(self):
y0 = roi["y0"]
width = roi["width"]
height = roi["height"]
- self.parent.ui.roi = pg.ROI([x0, y0], [width, height], pen=(62, 13, 244), scaleSnap=True) # blue
+ self.parent.ui.roi = pg.ROI(
+ [x0, y0], [width, height], pen=(62, 13, 244), scaleSnap=True
+ ) # blue
self.parent.ui.roi.addScaleHandle([1, 1], [0, 0])
self.parent.ui.image_view.addItem(self.parent.ui.roi)
self.parent.ui.roi.sigRegionChanged.connect(self.parent.roi_changed)
@@ -63,7 +65,12 @@ def table(self):
for _row in np.arange(len(self.parent.list_of_images)):
o_table.insert_empty_row(_row)
short_file_name = os.path.basename(self.parent.list_of_images[_row])
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.file_name, value=short_file_name, editable=False)
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.file_name,
+ value=short_file_name,
+ editable=False,
+ )
o_table.insert_item(
row=_row,
column=StatisticsColumnIndex.time_offset,
@@ -95,4 +102,6 @@ def _matplotlib(parent=None, widget=None):
widget.setLayout(layout)
return sc
- self.parent.statistics_plot = _matplotlib(parent=self.parent, widget=self.parent.ui.plot_widget)
+ self.parent.statistics_plot = _matplotlib(
+ parent=self.parent, widget=self.parent.ui.plot_widget
+ )
diff --git a/notebooks/__code/roi_statistics_vs_stack/load.py b/notebooks/__code/roi_statistics_vs_stack/load.py
index 42adfaa7..55253882 100755
--- a/notebooks/__code/roi_statistics_vs_stack/load.py
+++ b/notebooks/__code/roi_statistics_vs_stack/load.py
@@ -29,7 +29,9 @@ def data(self):
o_norm = Normalization()
o_norm.load(file=_file, auto_gamma_filter=False, manual_gamma_filter=False)
data = np.squeeze(o_norm.data["sample"]["data"][0])
- time_stamp = MetadataHandler.get_time_stamp(file_name=_file, ext=file_extension)
+ time_stamp = MetadataHandler.get_time_stamp(
+ file_name=_file, ext=file_extension
+ )
if acquisition_time_of_first_image == -1:
acquisition_time_of_first_image = time_stamp
time_stamp = 0
diff --git a/notebooks/__code/roi_statistics_vs_stack/main.py b/notebooks/__code/roi_statistics_vs_stack/main.py
index 0dcc9d80..36bd5574 100755
--- a/notebooks/__code/roi_statistics_vs_stack/main.py
+++ b/notebooks/__code/roi_statistics_vs_stack/main.py
@@ -16,13 +16,17 @@
class FileHandler(FileFolderBrowser):
def __init__(self, working_dir=""):
- super(FileHandler, self).__init__(working_dir=working_dir, next_function=self.display_status)
+ super(FileHandler, self).__init__(
+ working_dir=working_dir, next_function=self.display_status
+ )
def get_list_of_files(self):
return self.list_images_ui.selected
def select_folder(self):
- self.select_input_folder(instruction="Select folder containing images to process ...")
+ self.select_input_folder(
+ instruction="Select folder containing images to process ..."
+ )
def display_status(self, folder):
o_list = ListMostDominantExtension(working_dir=folder)
@@ -30,7 +34,13 @@ def display_status(self, folder):
result = o_list.get_files_of_selected_ext()
self.list_of_images = result.list_files
nbr_images = str(len(self.list_of_images))
- display(HTML('You have selected ' + nbr_images + " images "))
+ display(
+ HTML(
+ 'You have selected '
+ + nbr_images
+ + " images "
+ )
+ )
class ImageWindow(QMainWindow):
diff --git a/notebooks/__code/roi_statistics_vs_stack/table.py b/notebooks/__code/roi_statistics_vs_stack/table.py
index 9e0033ae..03e9ad0f 100755
--- a/notebooks/__code/roi_statistics_vs_stack/table.py
+++ b/notebooks/__code/roi_statistics_vs_stack/table.py
@@ -13,8 +13,33 @@ def reset(self):
reset_value = "NaN"
for _row in data_dict.keys():
_entry = data_dict[_row]
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.min, value=reset_value, editable=False)
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.max, value=reset_value, editable=False)
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.mean, value=reset_value, editable=False)
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.median, value=reset_value, editable=False)
- o_table.insert_item(row=_row, column=StatisticsColumnIndex.std, value=reset_value, editable=False)
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.min,
+ value=reset_value,
+ editable=False,
+ )
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.max,
+ value=reset_value,
+ editable=False,
+ )
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.mean,
+ value=reset_value,
+ editable=False,
+ )
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.median,
+ value=reset_value,
+ editable=False,
+ )
+ o_table.insert_item(
+ row=_row,
+ column=StatisticsColumnIndex.std,
+ value=reset_value,
+ editable=False,
+ )
diff --git a/notebooks/__code/rotate_and_crop_images/rotate_and_crop_images.py b/notebooks/__code/rotate_and_crop_images/rotate_and_crop_images.py
index c728f7fb..8ac49697 100755
--- a/notebooks/__code/rotate_and_crop_images/rotate_and_crop_images.py
+++ b/notebooks/__code/rotate_and_crop_images/rotate_and_crop_images.py
@@ -69,7 +69,8 @@ def __init__(self, parent=None, o_load=None):
QMainWindow.__init__(self, parent=parent)
ui_full_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), os.path.join("ui", "ui_rotate_and_crop.ui")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ os.path.join("ui", "ui_rotate_and_crop.ui"),
)
self.ui = load_ui(ui_full_path, baseinstance=self)
self.init_statusbar()
@@ -150,7 +151,9 @@ def get_selected_image(self, file_index=None):
file_index = self.ui.file_index_slider.value()
if self.data_dict[file_index][DataDictKeys.data] is None:
- data = RotateAndCropImages.load_data(filename=self.data_dict[file_index][DataDictKeys.filename])
+ data = RotateAndCropImages.load_data(
+ filename=self.data_dict[file_index][DataDictKeys.filename]
+ )
self.data_dict[file_index][DataDictKeys.data] = data
else:
@@ -222,12 +225,20 @@ def display_grid(self):
line_color = (255, 0, 0, 155, 0.2)
lines = np.array(
[line_color for n in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
line_view_binning = pg.GraphItem()
self.ui.image_view.addItem(line_view_binning)
- line_view_binning.setData(pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False)
+ line_view_binning.setData(
+ pos=pos, adj=adj, pen=lines, symbol=None, pxMode=False
+ )
self.line_view_binning = line_view_binning
@@ -244,7 +255,9 @@ def get_or_load_data(self, file_index=0):
array. If it's already there, just return the array
"""
if self.data_dict[file_index][DataDictKeys.data] is None:
- data = RotateAndCropImages.load_data(self.data_dict[file_index][DataDictKeys.filename])
+ data = RotateAndCropImages.load_data(
+ self.data_dict[file_index][DataDictKeys.filename]
+ )
self.data_dict[file_index][DataDictKeys.data] = data
return data
else:
@@ -267,7 +280,9 @@ def rotation_value_changed(self):
def get_crop_region(self):
data = self.get_selected_image()
# data = self.live_data
- region = self.roi.getArraySlice(np.transpose(data), self.ui.image_view.imageItem)
+ region = self.roi.getArraySlice(
+ np.transpose(data), self.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop - 1
@@ -295,7 +310,9 @@ def rotate_and_crop_all(self):
rotated_data = data[y0:y1, x0:x1]
self.rotated_data_dict[file_index] = {
- DataDictKeys.filename: self.data_dict[file_index][DataDictKeys.filename],
+ DataDictKeys.filename: self.data_dict[file_index][
+ DataDictKeys.filename
+ ],
DataDictKeys.data: rotated_data,
}
@@ -338,7 +355,10 @@ def select_folder(self):
)
self.output_folder_ui = fileselector.FileSelectorPanel(
- instruction="Select Output Folder ...", start_dir=self.working_dir, type="directory", next=self.export
+ instruction="Select Output Folder ...",
+ start_dir=self.working_dir,
+ type="directory",
+ next=self.export,
)
self.output_folder_ui.show()
@@ -369,4 +389,10 @@ def export(self, output_folder):
w.close()
display(HTML(""))
- display(HTML('Files created in ' + full_output_folder + ""))
+ display(
+ HTML(
+ 'Files created in '
+ + full_output_folder
+ + ""
+ )
+ )
diff --git a/notebooks/__code/sans/extract.py b/notebooks/__code/sans/extract.py
index 4d38f63d..523f8dab 100755
--- a/notebooks/__code/sans/extract.py
+++ b/notebooks/__code/sans/extract.py
@@ -15,7 +15,10 @@
from __code.sans.sans_config import biosans_parameters, gpsans_parameters
STARTING_ENTRIES = ["entry", "DASlogs"]
-LIST_SANS_INSTRUMENTS = {"GP-SANS (CG2)": {"unix_name": "CG2"}, "BIO-SANS (CG3)": {"unix_name": "CG3"}}
+LIST_SANS_INSTRUMENTS = {
+ "GP-SANS (CG2)": {"unix_name": "CG2"},
+ "BIO-SANS (CG3)": {"unix_name": "CG3"},
+}
class Initializer:
@@ -23,8 +26,13 @@ def select_instrument(self):
list_instruments = list(LIST_SANS_INSTRUMENTS.keys())
instrument_ui = widgets.HBox(
[
- widgets.Label("Select your instrument", layout=widgets.Layout(width="15%")),
- widgets.Select(options=list_instruments, layout=widgets.Layout(width="30%", height="50px")),
+ widgets.Label(
+ "Select your instrument", layout=widgets.Layout(width="15%")
+ ),
+ widgets.Select(
+ options=list_instruments,
+ layout=widgets.Layout(width="30%", height="50px"),
+ ),
]
)
display(instrument_ui)
@@ -78,19 +86,30 @@ def display_metadata(self, list_nexus):
self.list_nexus = list_nexus
self.list_keys = self.retrieve_left_widget_list_keys()
- self.list_values = self.retrieve_right_widget_list_keys(left_widget_key_selected=list(self.list_keys)[0])
+ self.list_values = self.retrieve_right_widget_list_keys(
+ left_widget_key_selected=list(self.list_keys)[0]
+ )
# search box
- search_box = widgets.HBox([widgets.Label("Search:"), widgets.Text("", layout=widgets.Layout(width="30%"))])
+ search_box = widgets.HBox(
+ [
+ widgets.Label("Search:"),
+ widgets.Text("", layout=widgets.Layout(width="30%")),
+ ]
+ )
# search_text_widget = search_box.children[1]
# search_text_widget.observe(self.search_text_changed, names='value')
# list of keys
hori_box = widgets.HBox(
[
- widgets.Select(options=self.list_keys, layout=widgets.Layout(width="400px", height=self.widget_height)),
+ widgets.Select(
+ options=self.list_keys,
+ layout=widgets.Layout(width="400px", height=self.widget_height),
+ ),
widgets.SelectMultiple(
- options=self.list_values, layout=widgets.Layout(width="400px", height=self.widget_height)
+ options=self.list_values,
+ layout=widgets.Layout(width="400px", height=self.widget_height),
),
],
)
@@ -99,7 +118,11 @@ def display_metadata(self, list_nexus):
self.left_widget_ui.observe(self.left_widget_changed, names="value")
self.right_widget_ui.observe(self.right_widget_changed, names="value")
- display(widgets.Label("Command + Click: to select more than 1 element in the right widget"))
+ display(
+ widgets.Label(
+ "Command + Click: to select more than 1 element in the right widget"
+ )
+ )
def left_widget_changed(self, new_value):
value_selected = new_value["new"]
@@ -170,10 +193,16 @@ def reformat_dict(full_dict):
return new_full_dict
def extract_all_in_one(self, output_folder):
- display(HTML('Work in progress ... '))
+ display(
+ HTML(
+ 'Work in progress ... '
+ )
+ )
self.output_folder_ui.shortcut_buttons.close()
- output_file_name = Extract.create_output_file_name(output_folder=output_folder, nbr_nexus=len(self.list_nexus))
+ output_file_name = Extract.create_output_file_name(
+ output_folder=output_folder, nbr_nexus=len(self.list_nexus)
+ )
full_list_selected = self.full_list_selected
# get list of path
@@ -194,7 +223,9 @@ def extract_all_in_one(self, output_folder):
if _index == 0:
label_of_columns.append("nexus name")
- reduction_log_dict = Extract.get_entry_value(nexus_file_name=_nexus, list_entry_path=list_entry_path)
+ reduction_log_dict = Extract.get_entry_value(
+ nexus_file_name=_nexus, list_entry_path=list_entry_path
+ )
for _key in reduction_log_dict.keys():
_value = reduction_log_dict[_key]
# print(f"-> _key:{_key}: {_value}")
diff --git a/notebooks/__code/sans/sans_config.py b/notebooks/__code/sans/sans_config.py
index 71f26c5c..5aaef869 100755
--- a/notebooks/__code/sans/sans_config.py
+++ b/notebooks/__code/sans/sans_config.py
@@ -186,7 +186,12 @@
gpsans_parameters = {
"special_parameters": {
- "list": ["sample_transmission", "background_transmission", "beam_center", "transmission_radius_used (mm)"],
+ "list": [
+ "sample_transmission",
+ "background_transmission",
+ "beam_center",
+ "transmission_radius_used (mm)",
+ ],
"path": ["reduction_information", "special_parameters"],
},
"sample_logs": {
diff --git a/notebooks/__code/scale_overlapping_images.py b/notebooks/__code/scale_overlapping_images.py
index 6907276f..3d5905e9 100755
--- a/notebooks/__code/scale_overlapping_images.py
+++ b/notebooks/__code/scale_overlapping_images.py
@@ -151,7 +151,9 @@ def update_all_plots(self):
nbr_profile = len(list_index_profile_selected)
nbr_file_selected = len(list_index_file_selected)
color = Color()
- list_rgb_profile_color = color.get_list_rgb(nbr_color=(nbr_profile * nbr_file_selected))
+ list_rgb_profile_color = color.get_list_rgb(
+ nbr_color=(nbr_profile * nbr_file_selected)
+ )
self.ui.all_plots_view.clear()
if nbr_profile == 0:
return
@@ -165,10 +167,16 @@ def update_all_plots(self):
for _color_index_file, _index_file in enumerate(list_index_file_selected):
_data = self.data_dict["data"][_index_file]
- for _color_index_profile, _index_profile in enumerate(list_index_profile_selected):
+ for _color_index_profile, _index_profile in enumerate(
+ list_index_profile_selected
+ ):
legend = f"File #{_index_file} - Profile #{_index_profile}"
- _color = list_rgb_profile_color[_color_index_file + _color_index_profile * nbr_file_selected]
- [x_axis, y_axis] = self.get_profile(image=np.transpose(_data), profile_roi_row=_index_profile)
+ _color = list_rgb_profile_color[
+ _color_index_file + _color_index_profile * nbr_file_selected
+ ]
+ [x_axis, y_axis] = self.get_profile(
+ image=np.transpose(_data), profile_roi_row=_index_profile
+ )
self.ui.all_plots_view.plot(x_axis, y_axis, name=legend, pen=_color)
def display_image(self, recalculate_image=False):
@@ -237,7 +245,9 @@ def is_row_enabled(self, row=-1):
def update_guide_table_using_guide_rois(self):
for _row, _roi in enumerate(self.list_guide_pyqt_roi):
if self.is_row_enabled(row=_row):
- region = _roi.getArraySlice(self.live_image, self.ui.image_view.imageItem)
+ region = _roi.getArraySlice(
+ self.live_image, self.ui.image_view.imageItem
+ )
x0 = region[0][0].start
x1 = region[0][0].stop
@@ -310,7 +320,9 @@ def rename_all_plots_profiles_table(self):
"""rename all the profile name"""
nbr_row = self.ui.tableWidget.rowCount()
for _row in np.arange(nbr_row):
- self.ui.all_plots_profiles_table.item(_row, 0).setText(f"Profile # {_row+1}")
+ self.ui.all_plots_profiles_table.item(_row, 0).setText(
+ f"Profile # {_row+1}"
+ )
# setter
def set_item_all_plots_profile_table(self, row=0):
@@ -319,12 +331,16 @@ def set_item_all_plots_profile_table(self, row=0):
self.ui.all_plots_profiles_table.setItem(row, 0, item)
def set_item_profile_table(self, row=0):
- spacerItem_left = QtGui.QSpacerItem(408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
+ spacerItem_left = QtGui.QSpacerItem(
+ 408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding
+ )
widget = QtGui.QComboBox()
widget.addItems(self.default_profile_width_values)
widget.blockSignals(True)
widget.currentIndexChanged.connect(self.profile_width_changed)
- spacerItem_right = QtGui.QSpacerItem(408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
+ spacerItem_right = QtGui.QSpacerItem(
+ 408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding
+ )
hori_layout = QtGui.QHBoxLayout()
hori_layout.addItem(spacerItem_left)
hori_layout.addWidget(widget)
@@ -336,12 +352,16 @@ def set_item_profile_table(self, row=0):
def set_item_main_table(self, row=0, col=0, value=""):
if col == 0:
- spacerItem_left = QtGui.QSpacerItem(408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
+ spacerItem_left = QtGui.QSpacerItem(
+ 408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding
+ )
widget = QtGui.QCheckBox()
widget.blockSignals(True)
self.list_table_widget_checkbox.insert(row, widget)
widget.stateChanged.connect(self.guide_state_changed)
- spacerItem_right = QtGui.QSpacerItem(408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
+ spacerItem_right = QtGui.QSpacerItem(
+ 408, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding
+ )
hori_layout = QtGui.QHBoxLayout()
hori_layout.addItem(spacerItem_left)
hori_layout.addWidget(widget)
@@ -379,7 +399,9 @@ def get_profile_dimensions(self, row=-1):
y_top = y0
y_bottom = y0 + height
- Profile = collections.namedtuple("Profile", ["x_left", "x_right", "y_top", "y_bottom", "profile_center"])
+ Profile = collections.namedtuple(
+ "Profile", ["x_left", "x_right", "y_top", "y_bottom", "profile_center"]
+ )
result = Profile(x_left, x_right, y_top, y_bottom, profile_center)
return result
@@ -400,7 +422,9 @@ def get_profile(self, image=[], profile_roi_row=-1):
mean_axis = 0
x_axis = np.arange(y_top, y_bottom)
- _data = image[x_left:x_right, y_top:y_bottom] # because pyqtgrpah display transpose images
+ _data = image[
+ x_left:x_right, y_top:y_bottom
+ ] # because pyqtgrpah display transpose images
profile = np.mean(_data, axis=mean_axis)
return [x_axis, profile]
@@ -459,7 +483,9 @@ def highlight_guide_profile_pyqt_rois(self, row=-1):
return
try:
- self._highlights_guide_profile_pyqt_roi(row=previous_active_row, status="deactivated")
+ self._highlights_guide_profile_pyqt_roi(
+ row=previous_active_row, status="deactivated"
+ )
self._highlights_guide_profile_pyqt_roi(row=row, status="activated")
except:
pass
@@ -611,7 +637,10 @@ def profile_along_axis_changed(self):
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(
- self, directory=self.working_dir, caption="Select Output Folder", options=QFileDialog.ShowDirsOnly
+ self,
+ directory=self.working_dir,
+ caption="Select Output Folder",
+ options=QFileDialog.ShowDirsOnly,
)
if _export_folder:
o_export = ExportProfiles(parent=self, export_folder=_export_folder)
@@ -629,7 +658,9 @@ def next_image_button_clicked(self):
def help_button_clicked(self):
import webbrowser
- webbrowser.open("https://neutronimaging.pages.ornl.gov/en/tutorial/notebooks/profile/")
+ webbrowser.open(
+ "https://neutronimaging.pages.ornl.gov/en/tutorial/notebooks/profile/"
+ )
def closeEvent(self, event=None):
pass
@@ -642,7 +673,9 @@ def __init__(self, parent=None, export_folder=""):
def _create_output_file_name(self, profile_index=0):
base_name = os.path.basename(self.parent.working_dir)
- output_file_name = os.path.join(self.export_folder, f"{base_name}_profile_{profile_index+1}.txt")
+ output_file_name = os.path.join(
+ self.export_folder, f"{base_name}_profile_{profile_index+1}.txt"
+ )
return output_file_name
def _create_metadata(self, profile_index=0):
@@ -655,7 +688,9 @@ def _create_metadata(self, profile_index=0):
y_top = profile_dimension.y_top
y_bottom = profile_dimension.y_bottom
metadata.append("#Profile dimension:")
- metadata.append(f"# * [x0, y0, x1, y1] = [{x_left}, {y_top}, {x_right}, {y_bottom}]")
+ metadata.append(
+ f"# * [x0, y0, x1, y1] = [{x_left}, {y_top}, {x_right}, {y_bottom}]"
+ )
if is_x_profile_direction:
metadata.append("# * integrated over y_axis")
table_axis = ["#x_axis"]
@@ -675,7 +710,9 @@ def _create_data(self, profile_index=0):
all_profiles = []
x_axis = []
for _data in self.parent.data_dict["data"]:
- [x_axis, profile] = self.parent.get_profile(image=np.transpose(_data), profile_roi_row=profile_index)
+ [x_axis, profile] = self.parent.get_profile(
+ image=np.transpose(_data), profile_roi_row=profile_index
+ )
all_profiles.append(list(profile))
data = []
@@ -688,10 +725,17 @@ def _create_data(self, profile_index=0):
def run(self):
_nbr_profiles = self.parent.ui.tableWidget.rowCount()
for _profile_index in np.arange(_nbr_profiles):
- _output_file_name = self._create_output_file_name(profile_index=_profile_index)
+ _output_file_name = self._create_output_file_name(
+ profile_index=_profile_index
+ )
metadata = self._create_metadata(profile_index=_profile_index)
data = self._create_data(profile_index=_profile_index)
- make_ascii_file(metadata=metadata, data=data, output_file_name=_output_file_name, dim="1d")
+ make_ascii_file(
+ metadata=metadata,
+ data=data,
+ output_file_name=_output_file_name,
+ dim="1d",
+ )
display(HTML(f"Exported Profile file {_output_file_name}"))
@@ -712,14 +756,19 @@ def add(self):
def update(self):
self._define_profile()
- self.parent.ui.image_view.removeItem(self.parent.list_profile_pyqt_roi[self.row])
+ self.parent.ui.image_view.removeItem(
+ self.parent.list_profile_pyqt_roi[self.row]
+ )
self.parent.list_profile_pyqt_roi[self.row] = self.__profile
def _define_guide(self):
"""define the guide"""
guide_roi = pg.RectROI(
[self.parent.default_guide_roi["x0"], self.parent.default_guide_roi["y0"]],
- [self.parent.default_guide_roi["width"], self.parent.default_guide_roi["height"]],
+ [
+ self.parent.default_guide_roi["width"],
+ self.parent.default_guide_roi["height"],
+ ],
pen=self.parent.default_guide_roi["color_activated"],
)
guide_roi.addScaleHandle([1, 1], [0, 0])
@@ -776,7 +825,13 @@ def _define_profile(self):
line_color = tuple(_list_line_color)
lines = np.array(
[line_color for n in np.arange(len(pos))],
- dtype=[("red", np.ubyte), ("green", np.ubyte), ("blue", np.ubyte), ("alpha", np.ubyte), ("width", float)],
+ dtype=[
+ ("red", np.ubyte),
+ ("green", np.ubyte),
+ ("blue", np.ubyte),
+ ("alpha", np.ubyte),
+ ("width", float),
+ ],
)
profile = pg.GraphItem()
@@ -797,20 +852,28 @@ def timestamp_dict(self):
def table(self):
# init the summary table
list_files_full_name = self.parent.data_dict["file_name"]
- list_files_short_name = [os.path.basename(_file) for _file in list_files_full_name]
+ list_files_short_name = [
+ os.path.basename(_file) for _file in list_files_full_name
+ ]
list_time_stamp = self.parent.timestamp_dict["list_time_stamp"]
- list_time_stamp_user_format = self.parent.timestamp_dict["list_time_stamp_user_format"]
+ list_time_stamp_user_format = self.parent.timestamp_dict[
+ "list_time_stamp_user_format"
+ ]
time_0 = list_time_stamp[0]
for _row, _file in enumerate(list_files_short_name):
self.parent.ui.summary_table.insertRow(_row)
self.set_item_summary_table(row=_row, col=0, value=_file)
- self.set_item_summary_table(row=_row, col=1, value=list_time_stamp_user_format[_row])
+ self.set_item_summary_table(
+ row=_row, col=1, value=list_time_stamp_user_format[_row]
+ )
_offset = list_time_stamp[_row] - time_0
self.set_item_summary_table(row=_row, col=2, value=f"{_offset:0.2f}")
self.parent.ui.all_plots_file_name_table.insertRow(_row)
- self.set_item_all_plot_file_name_table(row=_row, value=os.path.basename(_file))
+ self.set_item_all_plot_file_name_table(
+ row=_row, value=os.path.basename(_file)
+ )
def parameters(self):
# init the position of the measurement ROI
@@ -819,7 +882,9 @@ def parameters(self):
self.parent.default_guide_roi["height"] = int(height / 5)
self.parent.default_guide_roi["x0"] = int(width / 2)
self.parent.default_guide_roi["y0"] = int(height / 2)
- self.parent.default_profile_width_values = [str(_value) for _value in self.parent.default_profile_width_values]
+ self.parent.default_profile_width_values = [
+ str(_value) for _value in self.parent.default_profile_width_values
+ ]
def widgets(self):
_file_path = os.path.dirname(__file__)
@@ -827,28 +892,32 @@ def widgets(self):
os.path.join(_file_path, "static/profile/button_rotation_left_fast.png")
)
self.parent.ui.left_rotation_button_fast.setStyleSheet(
- "background-image: " "url('" + left_rotation_fast_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + left_rotation_fast_file + "'); " + "background-repeat: no-repeat"
)
right_rotation_fast_file = os.path.abspath(
os.path.join(_file_path, "static/profile/button_rotation_right_fast.png")
)
self.parent.ui.right_rotation_button_fast.setStyleSheet(
- "background-image: " "url('" + right_rotation_fast_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + right_rotation_fast_file + "'); " + "background-repeat: no-repeat"
)
left_rotation_slow_file = os.path.abspath(
os.path.join(_file_path, "static/profile/button_rotation_left_slow.png")
)
self.parent.ui.left_rotation_button_slow.setStyleSheet(
- "background-image: " "url('" + left_rotation_slow_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + left_rotation_slow_file + "'); " + "background-repeat: no-repeat"
)
right_rotation_slow_file = os.path.abspath(
os.path.join(_file_path, "static/profile/button_rotation_right_slow.png")
)
self.parent.ui.right_rotation_button_slow.setStyleSheet(
- "background-image: " "url('" + right_rotation_slow_file + "'); " + "background-repeat: no-repeat"
+ "background-image: "
+ "url('" + right_rotation_slow_file + "'); " + "background-repeat: no-repeat"
)
self.parent.ui.splitter_2.setSizes([250, 50])
@@ -862,12 +931,16 @@ def widgets(self):
# update size of table columns
nbr_columns = self.parent.ui.tableWidget.columnCount()
for _col in range(nbr_columns):
- self.parent.ui.tableWidget.setColumnWidth(_col, self.parent.guide_table_width[_col])
+ self.parent.ui.tableWidget.setColumnWidth(
+ _col, self.parent.guide_table_width[_col]
+ )
# update size of summary table
nbr_columns = self.parent.ui.summary_table.columnCount()
for _col in range(nbr_columns):
- self.parent.ui.summary_table.setColumnWidth(_col, self.parent.summary_table_width[_col])
+ self.parent.ui.summary_table.setColumnWidth(
+ _col, self.parent.summary_table_width[_col]
+ )
self.parent.display_ui = [
self.parent.ui.display_size_label,
@@ -926,7 +999,8 @@ def get_image_selected(self, recalculate_image=False):
angle = self.parent.rotation_angle
# rotate all images
self.parent.data_dict["data"] = [
- transform.rotate(_image, angle) for _image in self.parent.data_dict_raw["data"]
+ transform.rotate(_image, angle)
+ for _image in self.parent.data_dict_raw["data"]
]
_image = self.parent.data_dict["data"][slider_index]
@@ -950,7 +1024,9 @@ def display_images(self):
_view_box.setState(_state)
if not first_update:
- _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
+ _histo_widget.setLevels(
+ self.parent.histogram_level[0], self.parent.histogram_level[1]
+ )
def calculate_matrix_grid(self, grid_size=1, height=1, width=1):
"""calculate the matrix that defines the vertical and horizontal lines
@@ -1000,12 +1076,17 @@ def display_grid(self):
grid_size = self.parent.ui.grid_size_slider.value()
[height, width] = np.shape(self.parent.live_image)
- pos_adj_dict = self.calculate_matrix_grid(grid_size=grid_size, height=height, width=width)
+ pos_adj_dict = self.calculate_matrix_grid(
+ grid_size=grid_size, height=height, width=width
+ )
pos = pos_adj_dict["pos"]
adj = pos_adj_dict["adj"]
line_color = self.parent.grid_view["color"]
- _transparency_value = 255 - (float(str(self.parent.ui.transparency_slider.value())) / 100) * 255
+ _transparency_value = (
+ 255
+ - (float(str(self.parent.ui.transparency_slider.value())) / 100) * 255
+ )
_list_line_color = list(line_color)
_list_line_color[3] = _transparency_value
line_color = tuple(_list_line_color)
diff --git a/notebooks/__code/select_files_and_folders.py b/notebooks/__code/select_files_and_folders.py
index 066ede30..11b8c7dc 100755
--- a/notebooks/__code/select_files_and_folders.py
+++ b/notebooks/__code/select_files_and_folders.py
@@ -14,7 +14,9 @@ class SelectFiles(FileFolderBrowser):
def __init__(self, system=None):
working_dir = system.System.get_working_dir()
- super(SelectFiles, self).__init__(working_dir=working_dir, next_function=self.retrieve_list_of_files)
+ super(SelectFiles, self).__init__(
+ working_dir=working_dir, next_function=self.retrieve_list_of_files
+ )
filters = {"TIFF": "*.tif"}
default_filter = "TIFF"
@@ -25,7 +27,9 @@ def retrieve_list_of_files(self, list_of_files=""):
class SelectFolderWithDefaultPaths(FileFolderBrowser):
- def __init__(self, system=None, working_dir="", next_function=None, multiple_flag=False):
+ def __init__(
+ self, system=None, working_dir="", next_function=None, multiple_flag=False
+ ):
if working_dir == "":
if system is None:
working_dir = "/"
@@ -33,7 +37,9 @@ def __init__(self, system=None, working_dir="", next_function=None, multiple_fla
working_dir = system.System.get_working_dir()
super(SelectFolderWithDefaultPaths, self).__init__(
- working_dir=working_dir, next_function=next_function, multiple_flag=multiple_flag
+ working_dir=working_dir,
+ next_function=next_function,
+ multiple_flag=multiple_flag,
)
ipts = os.path.basename(self.working_dir)
@@ -43,9 +49,15 @@ def __init__(self, system=None, working_dir="", next_function=None, multiple_fla
hbox = widgets.HBox(
[
widgets.Button(
- description=f"Jump to {ipts} Shared Folder", button_style="success", layout=button_layout
+ description=f"Jump to {ipts} Shared Folder",
+ button_style="success",
+ layout=button_layout,
+ ),
+ widgets.Button(
+ description="Jump to My Home Folder",
+ button_style="success",
+ layout=button_layout,
),
- widgets.Button(description="Jump to My Home Folder", button_style="success", layout=button_layout),
]
)
go_to_shared_button_ui = hbox.children[0]
@@ -83,11 +95,20 @@ def display_file_selector(self, start_dir=""):
class SelectFolder(FileFolderBrowser):
- def __init__(self, system=None, next_function=None, is_input_folder=False, working_dir="", multiple_flags=False):
+ def __init__(
+ self,
+ system=None,
+ next_function=None,
+ is_input_folder=False,
+ working_dir="",
+ multiple_flags=False,
+ ):
if not working_dir:
working_dir = system.System.get_working_dir()
- super(SelectFolder, self).__init__(working_dir=working_dir, next_function=next_function)
+ super(SelectFolder, self).__init__(
+ working_dir=working_dir, next_function=next_function
+ )
if is_input_folder:
self.select_input_folder(multiple_flag=multiple_flags)
else:
@@ -100,15 +121,24 @@ class SelectAsciiFile(FileFolderBrowser):
def __init__(self, system=None, instruction=""):
working_dir = system.System.get_working_dir()
- super(SelectAsciiFile, self).__init__(working_dir=working_dir, next_function=self.done_message)
+ super(SelectAsciiFile, self).__init__(
+ working_dir=working_dir, next_function=self.done_message
+ )
if not instruction:
instruction = "Select ASCII File!"
filters = {"Text": "*.txt"}
default_filter = "Text"
- self.select_images(filters=filters, instruction=instruction, multiple_flag=False, default_filter=default_filter)
+ self.select_images(
+ filters=filters,
+ instruction=instruction,
+ multiple_flag=False,
+ default_filter=default_filter,
+ )
def done_message(self, file_selected):
self.ascii_file = file_selected
- display_html_message(title_message="Selected Ascii File:", message=file_selected)
+ display_html_message(
+ title_message="Selected Ascii File:", message=file_selected
+ )
diff --git a/notebooks/__code/select_metadata_to_display.py b/notebooks/__code/select_metadata_to_display.py
index 42155266..445a2071 100755
--- a/notebooks/__code/select_metadata_to_display.py
+++ b/notebooks/__code/select_metadata_to_display.py
@@ -28,7 +28,11 @@ def display_metadata_list(self):
self.box1 = widgets.HBox(
[
widgets.Label("Select Metadata:", layout=widgets.Layout(width="10%")),
- widgets.Dropdown(options=display_format, value=display_format[0], layout=widgets.Layout(width="50%")),
+ widgets.Dropdown(
+ options=display_format,
+ value=display_format[0],
+ layout=widgets.Layout(width="50%"),
+ ),
]
)
display(self.box1)
@@ -64,7 +68,12 @@ def export(self):
parent_folder = self.list_images[0].split(os.path.sep)[-2]
metadata_name = f"metadata#{self.key}"
- output_file_name = os.path.join(output_folder, f"{parent_folder}_{metadata_name}.txt")
+ output_file_name = os.path.join(
+ output_folder, f"{parent_folder}_{metadata_name}.txt"
+ )
file_handler.make_ascii_file(
- metadata=["#Metadata: " + self.key], data=self.export_txt, dim="1d", output_file_name=output_file_name
+ metadata=["#Metadata: " + self.key],
+ data=self.export_txt,
+ dim="1d",
+ output_file_name=output_file_name,
)
diff --git a/notebooks/__code/sequential_combine_images_using_metadata.py b/notebooks/__code/sequential_combine_images_using_metadata.py
index 754b759c..88c3a4cd 100755
--- a/notebooks/__code/sequential_combine_images_using_metadata.py
+++ b/notebooks/__code/sequential_combine_images_using_metadata.py
@@ -45,7 +45,13 @@ def select_folder(self):
self.files_list_widget.show()
def info_folder_selected(self, selected):
- display(HTML('You selected folder: ' + selected + ""))
+ display(
+ HTML(
+ 'You selected folder: '
+ + selected
+ + ""
+ )
+ )
self.folder_selected = selected
def record_file_extension(self, filename=""):
@@ -107,7 +113,8 @@ def how_to_combine(self):
)
self.combine_method = widgets.RadioButtons(
- options=["add", "arithmetic mean", "geometric mean"], value="arithmetic mean"
+ options=["add", "arithmetic mean", "geometric mean"],
+ value="arithmetic mean",
)
vertical = widgets.VBox([alge_box, geo_box, self.combine_method])
@@ -140,8 +147,15 @@ def create_merging_list(self, list_of_files=[]):
create_list_progress = widgets.HBox(
[
- widgets.Label("Creating Merging List:", layout=widgets.Layout(width="20%")),
- widgets.IntProgress(max=len(self.list_images), min=1, value=1, layout=widgets.Layout(width="80%")),
+ widgets.Label(
+ "Creating Merging List:", layout=widgets.Layout(width="20%")
+ ),
+ widgets.IntProgress(
+ max=len(self.list_images),
+ min=1,
+ value=1,
+ layout=widgets.Layout(width="80%"),
+ ),
]
)
display(create_list_progress)
@@ -164,16 +178,20 @@ def create_merging_list(self, list_of_files=[]):
_list_files = [list_of_files[0]]
_dict_metadata = {}
- _previous_metadata = MetadataHandler.get_metata(filename=list_of_files[0], list_metadata=list_of_tag_selected)
+ _previous_metadata = MetadataHandler.get_metata(
+ filename=list_of_files[0], list_metadata=list_of_tag_selected
+ )
_previous_run = self.isolate_run_text_from_filename(list_of_files[0])
for _index, _file in enumerate(list_of_files[1:]):
- _current_metadata = MetadataHandler.get_metata(filename=_file, list_metadata=list_of_tag_selected)
+ _current_metadata = MetadataHandler.get_metata(
+ filename=_file, list_metadata=list_of_tag_selected
+ )
_current_run = self.isolate_run_text_from_filename(_file)
- if self.are_metadata_within_error_range(_current_metadata, _previous_metadata) and (
- _previous_run == _current_run
- ):
+ if self.are_metadata_within_error_range(
+ _current_metadata, _previous_metadata
+ ) and (_previous_run == _current_run):
_list_files.append(_file)
else:
str_position_counter = f"{position_counter:04d}"
@@ -187,7 +205,9 @@ def create_merging_list(self, list_of_files=[]):
_list_files = [_file]
if _previous_run != _current_run:
- master_list_images_to_combine[_previous_run] = list_images_to_combine
+ master_list_images_to_combine[_previous_run] = (
+ list_images_to_combine
+ )
list_images_to_combine = collections.OrderedDict()
_previous_metadata = _current_metadata
@@ -233,7 +253,9 @@ def isolate_value_from_metadata(self, metadata_string):
def isolate_run_text_from_filename(self, full_file_name):
basename = os.path.basename(full_file_name)
- regular_expression = self.extension_to_regular_expression_dict[self.file_extension]
+ regular_expression = self.extension_to_regular_expression_dict[
+ self.file_extension
+ ]
m = re.search(regular_expression, basename)
if m is not None:
return m.group("run")
@@ -267,7 +289,10 @@ def recap_merging_list(self):
box2 = widgets.VBox(
[
- widgets.Label("List of Files for this position", layout=widgets.Layout(width="100%")),
+ widgets.Label(
+ "List of Files for this position",
+ layout=widgets.Layout(width="100%"),
+ ),
widgets.Select(
options=self.get_list_of_files_for_selected_run_position(),
layout=widgets.Layout(width="100%", height="500px"),
@@ -286,13 +311,19 @@ def recap_merging_list(self):
layout=widgets.Layout(width="300px"),
)
- str_metadata = self.get_str_metadata(metadata_dict=self.get_metadata_for_selected_run_position())
+ str_metadata = self.get_str_metadata(
+ metadata_dict=self.get_metadata_for_selected_run_position()
+ )
self.metadata_recap_textarea = box3.children[1]
self.metadata_recap_textarea.value = str_metadata
- hori_box = widgets.HBox([box0, box1, box2, box3], layout=widgets.Layout(width="100%"))
+ hori_box = widgets.HBox(
+ [box0, box1, box2, box3], layout=widgets.Layout(width="100%")
+ )
- self.list_of_positions_ui.on_trait_change(self.recap_positions_changed, name="value")
+ self.list_of_positions_ui.on_trait_change(
+ self.recap_positions_changed, name="value"
+ )
self.list_of_runs_ui.on_trait_change(self.recap_runs_changed, name="value")
display(hori_box)
@@ -325,10 +356,14 @@ def recap_positions_changed(self):
position_selected = self.list_of_positions_ui.value
run_selected = self.list_of_runs_ui.value
- list_files_of_files = self.master_list_images_to_combine[run_selected][position_selected]["list_of_files"]
+ list_files_of_files = self.master_list_images_to_combine[run_selected][
+ position_selected
+ ]["list_of_files"]
self.list_of_files_ui.options = list_files_of_files
- str_metadata = self.get_str_metadata(metadata_dict=self.get_metadata_for_selected_run_position())
+ str_metadata = self.get_str_metadata(
+ metadata_dict=self.get_metadata_for_selected_run_position()
+ )
self.metadata_recap_textarea.value = str_metadata
def recap_runs_changed(self):
@@ -406,7 +441,9 @@ def merge(self, output_folder):
merging_ui = widgets.HBox(
[
widgets.Label("Merging Progress", layout=widgets.Layout(width="20%")),
- widgets.IntProgress(max=len(merging_list.keys()), layout=widgets.Layout(width="80%")),
+ widgets.IntProgress(
+ max=len(merging_list.keys()), layout=widgets.Layout(width="80%")
+ ),
]
)
display(merging_ui)
@@ -426,14 +463,20 @@ def merge(self, output_folder):
_data = o_load.data["sample"]["data"]
_metadata = o_load.data["sample"]["metadata"][0]
- combined_data = SequentialCombineImagesUsingMetadata._merging_algorithm(algorithm, _data)
+ combined_data = SequentialCombineImagesUsingMetadata._merging_algorithm(
+ algorithm, _data
+ )
_new_name = self._define_merged_file_name(
- output_folder=output_folder, run_label=_run, position_label=_position
+ output_folder=output_folder,
+ run_label=_run,
+ position_label=_position,
)
output_file_name = os.path.join(output_folder, _new_name)
- file_handler.save_data(data=combined_data, filename=output_file_name, metadata=_metadata)
+ file_handler.save_data(
+ data=combined_data, filename=output_file_name, metadata=_metadata
+ )
_run_index += 1
progress_bar_ui.value = _run_index
@@ -442,17 +485,25 @@ def merge(self, output_folder):
del merging_ui
display(
- HTML('Files have been created in : ' + output_folder + "")
+ HTML(
+ 'Files have been created in : '
+ + output_folder
+ + ""
+ )
)
def make_output_folder(self, output_folder):
algorithm_selected = self.__get_formated_merging_algo_name()
folder_selected = os.path.basename(os.path.dirname(self.folder_selected))
- output_folder = os.path.join(output_folder, f"{folder_selected}_{algorithm_selected}")
+ output_folder = os.path.join(
+ output_folder, f"{folder_selected}_{algorithm_selected}"
+ )
file_handler.make_folder(output_folder)
return output_folder
- def _define_merged_file_name(self, output_folder="", run_label="", position_label=""):
+ def _define_merged_file_name(
+ self, output_folder="", run_label="", position_label=""
+ ):
"""Create the new merged file name using the run, position labels
ex: run_label = "run1"
diff --git a/notebooks/__code/shifting_time_offset.py b/notebooks/__code/shifting_time_offset.py
index b97da0b7..2d32cf31 100755
--- a/notebooks/__code/shifting_time_offset.py
+++ b/notebooks/__code/shifting_time_offset.py
@@ -50,7 +50,11 @@ def built_list_of_fits_files(self, input_folder):
)
self.list_of_fits_files = list_of_fits_files
else:
- display(HTML('No FITS files Found!'))
+ display(
+ HTML(
+ 'No FITS files Found!'
+ )
+ )
def retrieve_parent_folder(self, folder):
self.working_dir = Path(folder).parent
@@ -64,7 +68,13 @@ def selected_other_folders(self, list_of_other_folders):
)
)
for _folder in list_of_other_folders:
- display(HTML(' - ' + _folder + " FITS files to process!"))
+ display(
+ HTML(
+ ' - '
+ + _folder
+ + " FITS files to process!"
+ )
+ )
def retrieve_name_of_timestamp_file(self, input_folder):
timestamp_files = list(Path(input_folder).glob("*_Spectra.txt"))
@@ -79,7 +89,11 @@ def retrieve_name_of_timestamp_file(self, input_folder):
)
self.timestamp_file = timestamp_file
else:
- display(HTML('Time stamp not Found'))
+ display(
+ HTML(
+ 'Time stamp not Found'
+ )
+ )
def load_timestamp_file(self, timestamp_file):
counts_vs_time_array = pd.read_csv(timestamp_file, sep="\t")
@@ -104,7 +118,10 @@ def plot_cutoff(index):
return index
self.index_slider = interact(
- plot_cutoff, index=widgets.IntSlider(min=0, max=x_index_axis[-1], value=0, continuous_update=False)
+ plot_cutoff,
+ index=widgets.IntSlider(
+ min=0, max=x_index_axis[-1], value=0, continuous_update=False
+ ),
)
def get_file_prefix(self, file_name):
@@ -125,7 +142,9 @@ def offset_images(self):
nbr_folder = len(list_of_folders)
- progress_bar = widgets.IntProgress(max=nbr_folder, layout=widgets.Layout(width="50%"))
+ progress_bar = widgets.IntProgress(
+ max=nbr_folder, layout=widgets.Layout(width="50%")
+ )
display(progress_bar)
offset_index = self.index_slider.widget.result
@@ -134,7 +153,9 @@ def offset_images(self):
for _index, _current_working_folder in enumerate(list_of_folders):
# get full list of FITS files
- list_of_fits_files = np.array(self.get_list_of_fits_files(_current_working_folder))
+ list_of_fits_files = np.array(
+ self.get_list_of_fits_files(_current_working_folder)
+ )
if list_of_fits_files == []:
continue
@@ -145,7 +166,9 @@ def offset_images(self):
self.retrieve_name_of_timestamp_file(_current_working_folder)
timestamp_file = self.timestamp_file
if not Path(timestamp_file).exists():
- list_folder_with_error.append(f"Error in {_current_working_folder}. Timestamp file missing!")
+ list_folder_with_error.append(
+ f"Error in {_current_working_folder}. Timestamp file missing!"
+ )
continue
# rename all files starting by file at index offset_index which will become index 0
@@ -155,7 +178,9 @@ def offset_images(self):
new_output_dir = current_working_dir + "_timeoffset_corrected"
self.copy_and_renamed_fits_files(
- output_dir=new_output_dir, original_list_of_files=new_list_of_fits_files, prefix=prefix
+ output_dir=new_output_dir,
+ original_list_of_files=new_list_of_fits_files,
+ prefix=prefix,
)
# modify timestamp file
@@ -163,7 +188,9 @@ def offset_images(self):
output_dir=new_output_dir, old_timestamp_filename=timestamp_file
)
self.create_new_timestamp_file(
- timestamp_file=timestamp_file, offset=offset_index, new_timestamp_filename=new_timestamp_filename
+ timestamp_file=timestamp_file,
+ offset=offset_index,
+ new_timestamp_filename=new_timestamp_filename,
)
progress_bar.value = _index + 1
@@ -176,7 +203,9 @@ def create_new_timestamp_filename(self, output_dir="./", old_timestamp_filename=
short_old_timestamp_filename = str(Path(old_timestamp_filename).name)
return str(Path(output_dir).joinpath(short_old_timestamp_filename))
- def create_new_timestamp_file(self, timestamp_file="", offset=0, new_timestamp_filename=""):
+ def create_new_timestamp_file(
+ self, timestamp_file="", offset=0, new_timestamp_filename=""
+ ):
timestamp_array = self.load_timestamp_file(timestamp_file)
time_axis = timestamp_array[:, 0]
new_counts_axis = np.roll(np.array(timestamp_array[:, 1]), -offset)
@@ -188,18 +217,28 @@ def create_new_timestamp_file(self, timestamp_file="", offset=0, new_timestamp_f
# bring back axis together
combined_array = np.stack((new_time_axis, new_counts_axis)).T
# print("new timesamp_filename is {}".format(new_timestamp_filename))
- make_ascii_file(data=combined_array, output_file_name=new_timestamp_filename, sep="\t")
+ make_ascii_file(
+ data=combined_array, output_file_name=new_timestamp_filename, sep="\t"
+ )
def display_errors(self, list_folder_with_error=[]):
for _line in list_folder_with_error:
- display(HTML('' + _line + "!"))
+ display(
+ HTML('' + _line + "!")
+ )
- def copy_and_renamed_fits_files(self, output_dir="./", original_list_of_files=[], prefix="test"):
+ def copy_and_renamed_fits_files(
+ self, output_dir="./", original_list_of_files=[], prefix="test"
+ ):
current_working_dir = str(Path(original_list_of_files[0]).parent)
make_or_reset_folder(output_dir)
log_file = str(Path(output_dir).joinpath("renaming_log.txt"))
- renaming_log_file = [f"Renaming schema of folder {current_working_dir}", "old name -> new name", ""]
+ renaming_log_file = [
+ f"Renaming schema of folder {current_working_dir}",
+ "old name -> new name",
+ "",
+ ]
for index, _file in enumerate(original_list_of_files):
old_name = Path(_file).name
new_name = Path(output_dir).joinpath(prefix + f"_{index:05d}.fits")
diff --git a/notebooks/__code/super_user.py b/notebooks/__code/super_user.py
index b808afed..a4145d2b 100755
--- a/notebooks/__code/super_user.py
+++ b/notebooks/__code/super_user.py
@@ -8,7 +8,9 @@
from .config import debugger_folder as list_debugging_folder
from .config import debugging
from .config import password_to_unlock_config as PASSWORD
-from .config import percentage_of_images_to_use_for_roi_selection as PERCENTAGE_OF_IMAGES
+from .config import (
+ percentage_of_images_to_use_for_roi_selection as PERCENTAGE_OF_IMAGES,
+)
THIS_FILE_PATH = os.path.dirname(__file__)
CONFIG_FILE = os.path.join(THIS_FILE_PATH, "config.py")
@@ -19,15 +21,24 @@ def __init__(self):
self.launch_ui()
def launch_ui(self):
- password = widgets.Password(value="", placeholder="Enter password", description="Password", diabled=False)
+ password = widgets.Password(
+ value="",
+ placeholder="Enter password",
+ description="Password",
+ diabled=False,
+ )
password.observe(self.password_entered, names="value")
# ----
# debugging mode
- self.debugging_mode = widgets.Checkbox(value=debugging, description="Debugging Mode", disabled=True)
+ self.debugging_mode = widgets.Checkbox(
+ value=debugging, description="Debugging Mode", disabled=True
+ )
- self.debugging_folder_label = widgets.HTML("List of folders to look for when running in debugging mode.")
+ self.debugging_folder_label = widgets.HTML(
+ "List of folders to look for when running in debugging mode."
+ )
self.debugging_folder = widgets.Select(
options=list_debugging_folder,
value=list_debugging_folder[0],
@@ -46,21 +57,37 @@ def launch_ui(self):
)
self.remove_entry.on_click(self.remove_entry_clicked)
- self.new_entry_text = widgets.Text(value="", description="New folder", disabled=True)
+ self.new_entry_text = widgets.Text(
+ value="", description="New folder", disabled=True
+ )
self.add_entry = widgets.Button(
- description="", disabled=True, button_style="", icon="plus-square", layout=widgets.Layout(width="95px")
+ description="",
+ disabled=True,
+ button_style="",
+ icon="plus-square",
+ layout=widgets.Layout(width="95px"),
)
self.add_entry.on_click(self.add_entry_clicked)
hori_layout_percentage = widgets.HBox([self.new_entry_text, self.add_entry])
# percentage of images to use for roi selection
- self.percentage_roi_label = widgets.HTML("Percentage of images to use for ROI selection", disabled=True)
+ self.percentage_roi_label = widgets.HTML(
+ "Percentage of images to use for ROI selection", disabled=True
+ )
percentage_of_images = PERCENTAGE_OF_IMAGES * 100
self.percentage_roi_value = widgets.FloatText(
- value=percentage_of_images, disabled=True, layout=widgets.Layout(width="50px")
+ value=percentage_of_images,
+ disabled=True,
+ layout=widgets.Layout(width="50px"),
)
self.percentage_units = widgets.HTML("%", disabled=True)
- hori_layout = widgets.HBox([self.percentage_roi_label, self.percentage_roi_value, self.percentage_units])
+ hori_layout = widgets.HBox(
+ [
+ self.percentage_roi_label,
+ self.percentage_roi_value,
+ self.percentage_units,
+ ]
+ )
# ----
self.save_changes = widgets.Button(
@@ -159,7 +186,9 @@ def save_button_clicked(self, value):
elif "debugger_folder = " in _line:
ascii_after.append(f"debugger_folder = {str_list_folders_formatted}")
elif "percentage_of_images_to_use_for_roi_selection = " in _line:
- ascii_after.append(f"percentage_of_images_to_use_for_roi_selection = {percentage_roi_selection}")
+ ascii_after.append(
+ f"percentage_of_images_to_use_for_roi_selection = {percentage_roi_selection}"
+ )
else:
ascii_after.append(_line)
diff --git a/notebooks/__code/system.py b/notebooks/__code/system.py
index 95288602..ed393498 100755
--- a/notebooks/__code/system.py
+++ b/notebooks/__code/system.py
@@ -45,7 +45,9 @@ def select_working_dir(
""")
)
- full_list_instruments = cls.get_full_list_instrument(instrument_to_exclude=instrument_to_exclude)
+ full_list_instruments = cls.get_full_list_instrument(
+ instrument_to_exclude=instrument_to_exclude
+ )
full_list_instruments.sort()
if instrument in full_list_instruments:
default_instrument = instrument
@@ -53,16 +55,22 @@ def select_working_dir(
default_instrument = full_list_instruments[0]
start_path = cls.get_start_path(
- debugger_folder=debugger_folder, system_folder=system_folder, instrument=default_instrument
+ debugger_folder=debugger_folder,
+ system_folder=system_folder,
+ instrument=default_instrument,
)
cls.start_path = start_path
select_instrument_ui = widgets.HBox(
[
- widgets.Label("Select Instrument", layout=widgets.Layout(width="20%")),
+ widgets.Label(
+ "Select Instrument", layout=widgets.Layout(width="20%")
+ ),
widgets.Select(
- options=full_list_instruments, value=default_instrument, layout=widgets.Layout(width="20%")
+ options=full_list_instruments,
+ value=default_instrument,
+ layout=widgets.Layout(width="20%"),
),
]
)
@@ -76,7 +84,9 @@ def select_working_dir(
[
widgets.Label("IPTS-"),
widgets.Text(value="", layout=widgets.Layout(width="10%")),
- widgets.Label("DOES NOT EXIST!", layout=widgets.Layout(width="20%")),
+ widgets.Label(
+ "DOES NOT EXIST!", layout=widgets.Layout(width="20%")
+ ),
]
)
cls.result_label = top_hbox.children[2]
@@ -92,12 +102,16 @@ def select_working_dir(
[
widgets.Label("Select Folder", layout=widgets.Layout(width="20%")),
widgets.Select(
- options=user_list_folders, value=default_value, layout=widgets.Layout(height="300px")
+ options=user_list_folders,
+ value=default_value,
+ layout=widgets.Layout(height="300px"),
),
]
)
cls.user_list_folders = user_list_folders
- box = widgets.VBox([select_instrument_ui, top_hbox, or_label, bottom_hbox, help_ui])
+ box = widgets.VBox(
+ [select_instrument_ui, top_hbox, or_label, bottom_hbox, help_ui]
+ )
display(box)
cls.working_dir_ui = bottom_hbox.children[1]
@@ -114,7 +128,11 @@ def select_working_dir(
except:
cls.working_dir = os.path.expanduser("~")
display(
- HTML('working dir set to -> ' + cls.working_dir + "")
+ HTML(
+ 'working dir set to -> '
+ + cls.working_dir
+ + ""
+ )
)
cls.log_use(notebook=notebook)
@@ -151,13 +169,21 @@ def get_list_folders(cls, start_path=""):
cls.start_path = start_path
list_folders = sorted(glob.glob(os.path.join(start_path, "*")), reverse=True)
- short_list_folders = [os.path.basename(_folder) for _folder in list_folders if os.path.isdir(_folder)]
+ short_list_folders = [
+ os.path.basename(_folder)
+ for _folder in list_folders
+ if os.path.isdir(_folder)
+ ]
# short_list_folders = sorted(short_list_folders)
# if user mode, only display folder user can access
default_value = ""
if not debugging:
- user_list_folders = [os.path.basename(_folder) for _folder in list_folders if os.access(_folder, os.R_OK)]
+ user_list_folders = [
+ os.path.basename(_folder)
+ for _folder in list_folders
+ if os.access(_folder, os.R_OK)
+ ]
if len(user_list_folders) > 0:
default_value = user_list_folders[0]
else: # debugging
@@ -193,7 +219,9 @@ def get_computer_name(cls):
@classmethod
def get_facility_selected(cls):
- return cls.get_facility_from_instrument(instrument=cls.get_instrument_selected())
+ return cls.get_facility_from_instrument(
+ instrument=cls.get_instrument_selected()
+ )
@classmethod
def get_start_path(cls, debugger_folder="", system_folder="", instrument=""):
@@ -241,7 +269,9 @@ def get_start_path(cls, debugger_folder="", system_folder="", instrument=""):
def select_ipts_help(cls, value):
import webbrowser
- webbrowser.open("https://neutronimaging.pages.ornl.gov/tutorial/notebooks/select_ipts/")
+ webbrowser.open(
+ "https://neutronimaging.pages.ornl.gov/tutorial/notebooks/select_ipts/"
+ )
@classmethod
def check_instrument_input(cls, value_dict):
diff --git a/notebooks/__code/table_handler.py b/notebooks/__code/table_handler.py
index 9be94414..749177c7 100755
--- a/notebooks/__code/table_handler.py
+++ b/notebooks/__code/table_handler.py
@@ -12,7 +12,9 @@ def __init__(self, table_ui=None):
def select_everything(self, state):
nbr_row = self.table_ui.rowCount()
nbr_column = self.table_ui.columnCount()
- selection_range = QtGui.QTableWidgetSelectionRange(0, 0, nbr_row - 1, nbr_column - 1)
+ selection_range = QtGui.QTableWidgetSelectionRange(
+ 0, 0, nbr_row - 1, nbr_column - 1
+ )
self.table_ui.setRangeSelected(selection_range, state)
def select_rows(self, list_of_rows=None):
@@ -21,7 +23,9 @@ def select_rows(self, list_of_rows=None):
nbr_column = self.table_ui.columnCount()
for _row in list_of_rows:
- selection_range = QtGui.QTableWidgetSelectionRange(_row, 0, _row, nbr_column - 1)
+ selection_range = QtGui.QTableWidgetSelectionRange(
+ _row, 0, _row, nbr_column - 1
+ )
self.table_ui.setRangeSelected(selection_range, True)
def remove_all_rows(self):
diff --git a/notebooks/__code/template_ui.py b/notebooks/__code/template_ui.py
index b7c6dd9c..c2a29cf3 100755
--- a/notebooks/__code/template_ui.py
+++ b/notebooks/__code/template_ui.py
@@ -61,7 +61,9 @@ def __init__(self, parent=None, o_norm=None):
self.ui.slider.valueChanged.connect(self.file_index_changed)
# spacer
- spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
+ spacer = QtGui.QSpacerItem(
+ 40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum
+ )
bottom_layout.addWidget(label_1)
bottom_layout.addWidget(self.ui.slider)
diff --git a/notebooks/__code/time_utility.py b/notebooks/__code/time_utility.py
index 725ead85..bb552105 100755
--- a/notebooks/__code/time_utility.py
+++ b/notebooks/__code/time_utility.py
@@ -25,7 +25,13 @@ def format_time_stamp(file_name=None, time_stamp=None):
[hours, minutes, seconds] = hours.split(":")
_dict_time = {"hours": hours, "minutes": minutes, "seconds": seconds}
- _dict_time_stamp = {"week_day": week_day, "month": month, "day": day, "hours": _dict_time, "year": year}
+ _dict_time_stamp = {
+ "week_day": week_day,
+ "month": month,
+ "day": day,
+ "hours": _dict_time,
+ "year": year,
+ }
return [_short_file_name, _dict_time_stamp]
@@ -99,8 +105,10 @@ def __init__(self, folder="", files=[], is_notebook=False):
self.__is_notebook = is_notebook
def _run(self):
- [list_files, ext] = file_handler.retrieve_list_of_most_dominant_extension_from_folder(
- folder=self.folder, files=self.input_list_files
+ [list_files, ext] = (
+ file_handler.retrieve_list_of_most_dominant_extension_from_folder(
+ folder=self.folder, files=self.input_list_files
+ )
)
self.output_list_files = list_files
@@ -114,8 +122,15 @@ def _run(self):
if self.__is_notebook:
box = widgets.HBox(
[
- widgets.Label("Retrieving Time Stamp", layout=widgets.Layout(width="20%")),
- widgets.IntProgress(min=0, max=len(list_files), value=0, layout=widgets.Layout(width="50%")),
+ widgets.Label(
+ "Retrieving Time Stamp", layout=widgets.Layout(width="20%")
+ ),
+ widgets.IntProgress(
+ min=0,
+ max=len(list_files),
+ value=0,
+ layout=widgets.Layout(width="50%"),
+ ),
]
)
progress_bar = box.children[1]
@@ -152,7 +167,12 @@ class TimestampFormatter:
"%Y-%m-%dT%I:%M:%S-",
]
- def __init__(self, timestamp="", input_timestamp_format=None, output_timestamp_format=TIMESTAMP_FORMAT):
+ def __init__(
+ self,
+ timestamp="",
+ input_timestamp_format=None,
+ output_timestamp_format=TIMESTAMP_FORMAT,
+ ):
self.timestamp = timestamp
if input_timestamp_format is None:
self.input_timestamp_format = self.list_input_timestamp
@@ -189,7 +209,9 @@ def convert_timestamp(self, timestamp):
o_time = None
for _input_timestamp_format in input_timestamp_format:
# print("trying this format {} with this {}".format(_input_timestamp_format, timestamp))
- o_time = TimestampFormatter.get_time_dict(timestamp=timestamp, input_time_format=_input_timestamp_format)
+ o_time = TimestampFormatter.get_time_dict(
+ timestamp=timestamp, input_time_format=_input_timestamp_format
+ )
if o_time:
break
@@ -228,11 +250,15 @@ def convert_to_second(timestamp_value, timestamp_format=TIMESTAMP_FORMAT):
class AbsoluteTimeHandler:
def __init__(self, initial_absolute_time=None):
if initial_absolute_time is None:
- raise ValueError("Please provide an initial absolute time format as 'YYYY-MM-DDTHH:MM:SS.SSSSSS-05:00")
+ raise ValueError(
+ "Please provide an initial absolute time format as 'YYYY-MM-DDTHH:MM:SS.SSSSSS-05:00"
+ )
self.formatted_initial_absolute_time = parse(initial_absolute_time)
- def get_absolute_time_for_this_delta_time_array(self, delta_time_array=None, units="seconds"):
+ def get_absolute_time_for_this_delta_time_array(
+ self, delta_time_array=None, units="seconds"
+ ):
"""
:param delta_time_array: list of time offset
@@ -254,7 +280,10 @@ def get_absolute_time_for_this_delta_time_array(self, delta_time_array=None, uni
self.delta_time_formated = delta_time_formated
- absolute_time = [delta_time + self.formatted_initial_absolute_time for delta_time in delta_time_formated]
+ absolute_time = [
+ delta_time + self.formatted_initial_absolute_time
+ for delta_time in delta_time_formated
+ ]
return absolute_time
@@ -268,7 +297,9 @@ class RelativeTimeHandler:
def __init__(self, master_initial_time=None, local_initial_time=None):
if (master_initial_time is None) or (local_initial_time is None):
- raise ValueError("Please provide an initial absolute time format as 'YYYY-MM-DDTHH:MM:SS.SSSSSS-05:00")
+ raise ValueError(
+ "Please provide an initial absolute time format as 'YYYY-MM-DDTHH:MM:SS.SSSSSS-05:00"
+ )
formatted_master_initial_time = parse(master_initial_time)
formatted_local_initial_time = parse(local_initial_time)
@@ -276,7 +307,9 @@ def __init__(self, master_initial_time=None, local_initial_time=None):
if formatted_local_initial_time < formatted_master_initial_time:
raise ValueError("Master time should be before local time!")
- time_offset_calculated = formatted_local_initial_time - formatted_master_initial_time
+ time_offset_calculated = (
+ formatted_local_initial_time - formatted_master_initial_time
+ )
self.time_offset_calculated_s = time_offset_calculated.seconds
def get_relative_time_for_this_time_array(self, time_array=None):
diff --git a/notebooks/__code/timepix3_event_hdf5_he3_detector/timepix3_event_hdf5_he3_detector.py b/notebooks/__code/timepix3_event_hdf5_he3_detector/timepix3_event_hdf5_he3_detector.py
index 53f66e85..77f0d7c7 100755
--- a/notebooks/__code/timepix3_event_hdf5_he3_detector/timepix3_event_hdf5_he3_detector.py
+++ b/notebooks/__code/timepix3_event_hdf5_he3_detector/timepix3_event_hdf5_he3_detector.py
@@ -67,14 +67,20 @@ def rebin_and_display_h3_data(self):
bin_size = hbox.children[1]
- fig, ax = plt.subplots(figsize=(8, 8), nrows=1, ncols=1, num="Histogram of He3 detector")
+ fig, ax = plt.subplots(
+ figsize=(8, 8), nrows=1, ncols=1, num="Histogram of He3 detector"
+ )
- def plot_rebinned_data(x_axis="TOF", nbrs_bins=2, dSD_m=19.855, offset_micros=0, element="Ni"):
+ def plot_rebinned_data(
+ x_axis="TOF", nbrs_bins=2, dSD_m=19.855, offset_micros=0, element="Ni"
+ ):
if element == "Ni":
_handler = BraggEdgeLibrary(material=[element], number_of_bragg_edges=5)
else: # Ta
_handler = BraggEdgeLibrary(
- new_material=[{"name": "Ta", "lattice": 3.3058, "crystal_structure": "BCC"}],
+ new_material=[
+ {"name": "Ta", "lattice": 3.3058, "crystal_structure": "BCC"}
+ ],
number_of_bragg_edges=5,
)
@@ -132,11 +138,20 @@ def plot_rebinned_data(x_axis="TOF", nbrs_bins=2, dSD_m=19.855, offset_micros=0,
options=["TOF", "lambda"],
value="lambda",
),
- nbrs_bins=widgets.IntSlider(value=10000, min=1, max=100000, continuous_update=False),
+ nbrs_bins=widgets.IntSlider(
+ value=10000, min=1, max=100000, continuous_update=False
+ ),
dSD_m=widgets.FloatSlider(
- value=19.855, min=15, max=25, step=0.001, continuous_update=False, readout_format=".3f"
+ value=19.855,
+ min=15,
+ max=25,
+ step=0.001,
+ continuous_update=False,
+ readout_format=".3f",
+ ),
+ offset_micros=widgets.IntSlider(
+ value=0, min=0, max=15000, continuous_update=False
),
- offset_micros=widgets.IntSlider(value=0, min=0, max=15000, continuous_update=False),
element=widgets.RadioButtons(options=["Ni", "Ta"], value="Ni"),
)
display(v)
diff --git a/notebooks/__code/timepix3_from_event_to_histo_hdf5/timepix3_from_event_to_histo_hdf5.py b/notebooks/__code/timepix3_from_event_to_histo_hdf5/timepix3_from_event_to_histo_hdf5.py
index 0b0c3a0f..73f15edb 100755
--- a/notebooks/__code/timepix3_from_event_to_histo_hdf5/timepix3_from_event_to_histo_hdf5.py
+++ b/notebooks/__code/timepix3_from_event_to_histo_hdf5/timepix3_from_event_to_histo_hdf5.py
@@ -103,7 +103,9 @@ def display_infos(self):
vbox = widgets.VBox(
[
widgets.Label("Metadata"),
- widgets.Textarea(value=metadata, disabled=True, layout=widgets.Layout(height="200px")),
+ widgets.Textarea(
+ value=metadata, disabled=True, layout=widgets.Layout(height="200px")
+ ),
]
)
display(vbox)
@@ -111,14 +113,18 @@ def display_infos(self):
def define_detector(self):
self.width_ui = widgets.IntText(value=1024, description="Width")
self.height_ui = widgets.IntText(value=1024, description="Height")
- vbox = widgets.VBox([widgets.Label("MCP detector size:"), self.height_ui, self.width_ui])
+ vbox = widgets.VBox(
+ [widgets.Label("MCP detector size:"), self.height_ui, self.width_ui]
+ )
display(vbox)
def select_binning_parameter(self):
self.nbr_bin_ui = widgets.IntText(value=1000, description="Nbr of bins:")
display(self.nbr_bin_ui)
- self.range_to_use = widgets.IntSlider(value=50, max=100, min=1, description="% to use")
+ self.range_to_use = widgets.IntSlider(
+ value=50, max=100, min=1, description="% to use"
+ )
display(self.range_to_use)
def bins(self):
@@ -227,15 +233,21 @@ def define_output_filename(self):
input_nexus_filename = os.path.basename(self.input_nexus_file_name)
export_id = widgets.HBox(
[
- widgets.Label("Output file name:", layout=widgets.Layout(width="150px")),
- widgets.Text(value=input_nexus_filename, layout=widgets.Layout(width="300px")),
+ widgets.Label(
+ "Output file name:", layout=widgets.Layout(width="150px")
+ ),
+ widgets.Text(
+ value=input_nexus_filename, layout=widgets.Layout(width="300px")
+ ),
]
)
display(export_id)
self.output_file_name_id = export_id.children[1]
def select_output_location(self):
- o_output_folder = FileFolderBrowser(working_dir=self.working_dir, next_function=self.export_h5)
+ o_output_folder = FileFolderBrowser(
+ working_dir=self.working_dir, next_function=self.export_h5
+ )
o_output_folder.select_output_folder(instruction="Select output folder ...")
def export_h5(self, output_folder):
@@ -260,10 +272,16 @@ def export_h5(self, output_folder):
f.create_dataset("entry/histo/number_of_bins", data=self.nbr_bins)
f.create_dataset("entry/histo/tof_ns", data=self.bins_tof)
f.create_group("entry/infos")
- f.create_dataset("entry/infos/input_nexus_filename", data=self.input_nexus_file_name)
+ f.create_dataset(
+ "entry/infos/input_nexus_filename", data=self.input_nexus_file_name
+ )
display(HTML("Writing HDF5 file .... Done!"))
display(
- HTML('hdf5 file created:' + full_output_filename + "!")
+ HTML(
+ 'hdf5 file created:'
+ + full_output_filename
+ + "!"
+ )
)
logging.info(f"hdf5 file created: {full_output_filename}")
diff --git a/notebooks/__code/timepix3_histo_hdf5_mcp_detector/fit_regions.py b/notebooks/__code/timepix3_histo_hdf5_mcp_detector/fit_regions.py
index b7eea468..0c96b232 100755
--- a/notebooks/__code/timepix3_histo_hdf5_mcp_detector/fit_regions.py
+++ b/notebooks/__code/timepix3_histo_hdf5_mcp_detector/fit_regions.py
@@ -140,7 +140,10 @@ def high_lambda(self):
self.fit_dict["a0"] = {"value": a0_value, "error": a0_error}
self.fit_dict["b0"] = {"value": b0_value, "error": b0_error}
- self.fit_dict[FittingRegions.high_lambda] = {"xaxis": xaxis, "yaxis": yaxis_fitted}
+ self.fit_dict[FittingRegions.high_lambda] = {
+ "xaxis": xaxis,
+ "yaxis": yaxis_fitted,
+ }
def low_lambda(self):
logging.info("fitting low lambda:")
@@ -181,12 +184,19 @@ def low_lambda(self):
logging.info(f"\t{bhkl_error =}")
yaxis_fitted = kropff_low_lambda(
- xaxis, self.fit_dict["a0"]["value"], self.fit_dict["b0"]["value"], ahkl_value, bhkl_value
+ xaxis,
+ self.fit_dict["a0"]["value"],
+ self.fit_dict["b0"]["value"],
+ ahkl_value,
+ bhkl_value,
)
self.fit_dict["ahkl"] = {"value": ahkl_value, "error": ahkl_error}
self.fit_dict["bhkl"] = {"value": bhkl_value, "error": bhkl_error}
- self.fit_dict[FittingRegions.low_lambda] = {"xaxis": xaxis, "yaxis": yaxis_fitted}
+ self.fit_dict[FittingRegions.low_lambda] = {
+ "xaxis": xaxis,
+ "yaxis": yaxis_fitted,
+ }
def bragg_peak(self):
self.bragg_peak_fix_lambda()
@@ -285,7 +295,9 @@ def bragg_peak(self):
def bragg_peak_fix_lambda(self):
logging.info("Fitting bragg peak with a fixed initial lambda_hkl:")
- gmodel = Model(kropff_bragg_peak_tof, nan_policy="propagate", independent_vars=["lda"])
+ gmodel = Model(
+ kropff_bragg_peak_tof, nan_policy="propagate", independent_vars=["lda"]
+ )
lambda_hkl = self.lambdahkl
tau = self.tau
@@ -294,8 +306,12 @@ def bragg_peak_fix_lambda(self):
left_peak_index = self.left_edge_index
right_peak_index = self.right_edge_index
- xaxis = copy.deepcopy(self.x_axis_to_fit)[left_peak_index : right_peak_index + 1]
- yaxis = copy.deepcopy(self.y_axis_to_fit)[left_peak_index : right_peak_index + 1]
+ xaxis = copy.deepcopy(self.x_axis_to_fit)[
+ left_peak_index : right_peak_index + 1
+ ]
+ yaxis = copy.deepcopy(self.y_axis_to_fit)[
+ left_peak_index : right_peak_index + 1
+ ]
yaxis = -np.log(yaxis)
logging.info(f"{xaxis =}")
@@ -331,9 +347,14 @@ def bragg_peak_fix_lambda(self):
logging.info(f"\t{tau_value =}")
logging.info(f"\t{tau_error =}")
- yaxis_fitted = kropff_bragg_peak_tof(xaxis, a0, b0, ahkl, bhkl, ldahkl_value, sigma_value, tau_value)
+ yaxis_fitted = kropff_bragg_peak_tof(
+ xaxis, a0, b0, ahkl, bhkl, ldahkl_value, sigma_value, tau_value
+ )
self.fit_dict["lambdahkl"] = {"value": ldahkl_value, "error": ldahkl_error}
self.fit_dict["sigma"] = {"value": sigma_value, "error": sigma_error}
self.fit_dict["tau"] = {"value": tau_value, "error": tau_error}
- self.fit_dict[FittingRegions.bragg_peak] = {"xaxis": xaxis, "yaxis": yaxis_fitted}
+ self.fit_dict[FittingRegions.bragg_peak] = {
+ "xaxis": xaxis,
+ "yaxis": yaxis_fitted,
+ }
diff --git a/notebooks/__code/timepix3_histo_hdf5_mcp_detector/timepix3_histo_hdf5_mcp_detector.py b/notebooks/__code/timepix3_histo_hdf5_mcp_detector/timepix3_histo_hdf5_mcp_detector.py
index e53c6423..ede647cd 100755
--- a/notebooks/__code/timepix3_histo_hdf5_mcp_detector/timepix3_histo_hdf5_mcp_detector.py
+++ b/notebooks/__code/timepix3_histo_hdf5_mcp_detector/timepix3_histo_hdf5_mcp_detector.py
@@ -49,7 +49,9 @@ class Timepix3HistoHdf5McpDetector:
default_parameters = {
JSONKeys.dSD_m: 19.855,
- JSONKeys.rois_selected: {0: {JSONKeys.x0: 467, JSONKeys.y0: 99, JSONKeys.x1: 975, JSONKeys.y1: 429}},
+ JSONKeys.rois_selected: {
+ 0: {JSONKeys.x0: 467, JSONKeys.y0: 99, JSONKeys.x1: 975, JSONKeys.y1: 429}
+ },
JSONKeys.offset_micros: 0,
JSONKeys.time_shift: 0,
JSONKeys.element: "Ni",
@@ -103,7 +105,11 @@ def hdf5_or_config_file_input(self):
)
display(self.toggle_button)
- validate_button = widgets.Button(description="SELECT", icon="folder-open", layout=widgets.Layout(width="310px"))
+ validate_button = widgets.Button(
+ description="SELECT",
+ icon="folder-open",
+ layout=widgets.Layout(width="310px"),
+ )
display(validate_button)
validate_button.on_click(self.input_selection_made)
@@ -174,11 +180,19 @@ def load_config(self, config_file_name):
self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.a0] = float(a0)
self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.b0] = float(b0)
- self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.ahkl] = float(ahkl)
- self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.bhkl] = float(bhkl)
- self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.lambdahkl] = float(lambdahkl)
+ self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.ahkl] = float(
+ ahkl
+ )
+ self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.bhkl] = float(
+ bhkl
+ )
+ self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.lambdahkl] = (
+ float(lambdahkl)
+ )
self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.tau] = float(tau)
- self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.sigma] = float(sigma)
+ self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.sigma] = float(
+ sigma
+ )
display(
HTML(
@@ -197,7 +211,9 @@ def load_nexus(self, nexus_file_name=None):
with h5py.File(nexus_file_name, "r") as f:
self.stack = np.array(f["entry"]["histo"]["stack"])
- self.time_spectra = np.array(f["entry"]["histo"]["tof_ns"]) / 1000 # to convert to micros
+ self.time_spectra = (
+ np.array(f["entry"]["histo"]["tof_ns"]) / 1000
+ ) # to convert to micros
def preview_integrated_stack(self):
self.integrated_stack = self.stack.sum(axis=0)
@@ -260,7 +276,12 @@ def calculate_and_display_profile(self):
total_pixels_in_rois += width * height
_rect = patches.Rectangle(
- (x0, y0), x1 - x0, y1 - y0, linewidth=1, edgecolor=list_matplotlib_colors[_roi_index], facecolor="none"
+ (x0, y0),
+ x1 - x0,
+ y1 - y0,
+ linewidth=1,
+ edgecolor=list_matplotlib_colors[_roi_index],
+ facecolor="none",
)
rect_array.append(_rect)
@@ -273,7 +294,9 @@ def calculate_and_display_profile(self):
x1 = rois_selected[_roi_index]["x1"]
y1 = rois_selected[_roi_index]["y1"]
- total_counts_for_this_image += np.nansum(_image[y0 : y1 + 1, x0 : x1 + 1])
+ total_counts_for_this_image += np.nansum(
+ _image[y0 : y1 + 1, x0 : x1 + 1]
+ )
profile.append(total_counts_for_this_image / total_pixels_in_rois)
@@ -296,7 +319,9 @@ def plot_profile(x_axis, dSD_m, offset_micros, time_shift, element):
_handler = BraggEdgeLibrary(material=[element], number_of_bragg_edges=6)
else: # Ta
_handler = BraggEdgeLibrary(
- new_material=[{"name": "Ta", "lattice": 3.3058, "crystal_structure": "BCC"}],
+ new_material=[
+ {"name": "Ta", "lattice": 3.3058, "crystal_structure": "BCC"}
+ ],
number_of_bragg_edges=6,
)
@@ -363,7 +388,10 @@ def plot_profile(x_axis, dSD_m, offset_micros, time_shift, element):
readout_format=".3f",
),
offset_micros=widgets.IntSlider(
- value=self.default_parameters[JSONKeys.offset_micros], min=0, max=15000, continuous_update=False
+ value=self.default_parameters[JSONKeys.offset_micros],
+ min=0,
+ max=15000,
+ continuous_update=False,
),
time_shift=widgets.IntSlider(
value=self.default_parameters[JSONKeys.time_shift],
@@ -372,7 +400,9 @@ def plot_profile(x_axis, dSD_m, offset_micros, time_shift, element):
step=1,
continuous_update=False,
),
- element=widgets.RadioButtons(options=LIST_ELEMENTS, value=self.default_parameters[JSONKeys.element]),
+ element=widgets.RadioButtons(
+ options=LIST_ELEMENTS, value=self.default_parameters[JSONKeys.element]
+ ),
)
display(self.v)
@@ -382,7 +412,11 @@ def select_peak_to_fit(self):
'Full range of peak to fit (left_range, right_range)'
)
)
- display(HTML('Peak threshold (left_peak, right_peak)'))
+ display(
+ HTML(
+ 'Peak threshold (left_peak, right_peak)'
+ )
+ )
lambda_x_axis, profile_shifted = self.prepare_data()
self.lambda_x_axis = lambda_x_axis
@@ -476,17 +510,25 @@ def prepare_data(self):
def fitting(self):
# setup parameters
- display(HTML('Init parameters'))
+ display(
+ HTML('Init parameters')
+ )
text_width = "80px" # px
display(HTML('High lambda'))
default_a0 = self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.a0]
self.a0_layout = widgets.HBox(
- [widgets.Label("a\u2080"), widgets.IntText(default_a0, layout=widgets.Layout(width=text_width))]
+ [
+ widgets.Label("a\u2080"),
+ widgets.IntText(default_a0, layout=widgets.Layout(width=text_width)),
+ ]
)
default_b0 = self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.b0]
self.b0_layout = widgets.HBox(
- [widgets.Label("b\u2080"), widgets.IntText(default_b0, layout=widgets.Layout(width=text_width))]
+ [
+ widgets.Label("b\u2080"),
+ widgets.IntText(default_b0, layout=widgets.Layout(width=text_width)),
+ ]
)
high_layout = widgets.VBox([self.a0_layout, self.b0_layout])
display(high_layout)
@@ -494,14 +536,18 @@ def fitting(self):
display(HTML(""))
display(HTML('Low lambda'))
- default_ahkl = self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.ahkl]
+ default_ahkl = self.default_parameters[JSONKeys.fitting_parameters][
+ JSONKeys.ahkl
+ ]
self.ahkl_layout = widgets.HBox(
[
widgets.Label("a\u2095\u2096\u2097"),
widgets.IntText(default_ahkl, layout=widgets.Layout(width=text_width)),
]
)
- default_bhkl = self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.bhkl]
+ default_bhkl = self.default_parameters[JSONKeys.fitting_parameters][
+ JSONKeys.bhkl
+ ]
self.bhkl_layout = widgets.HBox(
[
widgets.Label("b\u2095\u2096\u2097"),
@@ -514,27 +560,45 @@ def fitting(self):
display(HTML(""))
display(HTML('Bragg peak'))
- default_lambdahkl = self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.lambdahkl]
+ default_lambdahkl = self.default_parameters[JSONKeys.fitting_parameters][
+ JSONKeys.lambdahkl
+ ]
self.lambdahkl_layout = widgets.HBox(
[
widgets.Label("\u03bb\u2095\u2096\u2097"),
- widgets.FloatText(default_lambdahkl, layout=widgets.Layout(width=text_width)),
+ widgets.FloatText(
+ default_lambdahkl, layout=widgets.Layout(width=text_width)
+ ),
]
)
default_tau = self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.tau]
self.tau_layout = widgets.HBox(
- [widgets.Label("\u03c4"), widgets.FloatText(default_tau, layout=widgets.Layout(width=text_width))]
+ [
+ widgets.Label("\u03c4"),
+ widgets.FloatText(default_tau, layout=widgets.Layout(width=text_width)),
+ ]
)
- default_sigma = self.default_parameters[JSONKeys.fitting_parameters][JSONKeys.sigma]
+ default_sigma = self.default_parameters[JSONKeys.fitting_parameters][
+ JSONKeys.sigma
+ ]
self.sigma_layout = widgets.HBox(
- [widgets.Label("\u03c3"), widgets.FloatText(default_sigma, layout=widgets.Layout(width=text_width))]
+ [
+ widgets.Label("\u03c3"),
+ widgets.FloatText(
+ default_sigma, layout=widgets.Layout(width=text_width)
+ ),
+ ]
+ )
+ bragg_peak_layout = widgets.VBox(
+ [self.lambdahkl_layout, self.tau_layout, self.sigma_layout]
)
- bragg_peak_layout = widgets.VBox([self.lambdahkl_layout, self.tau_layout, self.sigma_layout])
display(bragg_peak_layout)
display(widgets.HTML("
index: {left_peak}")
- logging.info(f"\tpeak right_range: {right_lambda_range}" + "\u212b " + f"-> index: {right_peak}")
- logging.info(f"\tedge left_range: {left_lambda_edge}" + "\u212b " + f"-> index: {left_edge}")
- logging.info(f"\tedge right_range: {right_lambda_edge}" + "\u212b " + f"-> index: {right_edge}")
+ logging.info(
+ f"\tpeak left_range: {left_lambda_range}"
+ + "\u212b "
+ + f"-> index: {left_peak}"
+ )
+ logging.info(
+ f"\tpeak right_range: {right_lambda_range}"
+ + "\u212b "
+ + f"-> index: {right_peak}"
+ )
+ logging.info(
+ f"\tedge left_range: {left_lambda_edge}"
+ + "\u212b "
+ + f"-> index: {left_edge}"
+ )
+ logging.info(
+ f"\tedge right_range: {right_lambda_edge}"
+ + "\u212b "
+ + f"-> index: {right_edge}"
+ )
logging.info(f"\tlambda_x_axis: {lambda_x_axis}")
logging.info(f"\tsize of profile: {len(profile_shifted)}")
@@ -604,7 +684,9 @@ def fit_peak(self, _):
# display full spectrum
list_matplotlib_colors = Color.list_matplotlib
- ax4.plot(x_axis_to_fit, -np.log(y_axis_to_fit), "*", color=list_matplotlib_colors[0])
+ ax4.plot(
+ x_axis_to_fit, -np.log(y_axis_to_fit), "*", color=list_matplotlib_colors[0]
+ )
max_counts = 0
dict_of_fit_dict = {}
@@ -630,13 +712,21 @@ def fit_peak(self, _):
# display fitting
# high lambda
- x_axis_fitted_high_lambda = o_fit_regions.fit_dict[FittingRegions.high_lambda]["xaxis"]
- y_axis_fitted_high_lambda = o_fit_regions.fit_dict[FittingRegions.high_lambda]["yaxis"]
+ x_axis_fitted_high_lambda = o_fit_regions.fit_dict[FittingRegions.high_lambda][
+ "xaxis"
+ ]
+ y_axis_fitted_high_lambda = o_fit_regions.fit_dict[FittingRegions.high_lambda][
+ "yaxis"
+ ]
ax4.plot(x_axis_fitted_high_lambda, y_axis_fitted_high_lambda, "r-")
# low lambda
- x_axis_fitted_low_lambda = o_fit_regions.fit_dict[FittingRegions.low_lambda]["xaxis"]
- y_axis_fitted_low_lambda = o_fit_regions.fit_dict[FittingRegions.low_lambda]["yaxis"]
+ x_axis_fitted_low_lambda = o_fit_regions.fit_dict[FittingRegions.low_lambda][
+ "xaxis"
+ ]
+ y_axis_fitted_low_lambda = o_fit_regions.fit_dict[FittingRegions.low_lambda][
+ "yaxis"
+ ]
ax4.plot(x_axis_fitted_low_lambda, y_axis_fitted_low_lambda, "y-")
# bragg peak
@@ -666,7 +756,10 @@ def fit_peak(self, _):
_handler = BraggEdgeLibrary(material=[element], number_of_bragg_edges=6)
else: # Ta
_handler = BraggEdgeLibrary(
- new_material=[{"name": "Ta", "lattice": 3.3058, "crystal_structure": "BCC"}], number_of_bragg_edges=6
+ new_material=[
+ {"name": "Ta", "lattice": 3.3058, "crystal_structure": "BCC"}
+ ],
+ number_of_bragg_edges=6,
)
self.bragg_edges = _handler.bragg_edges
@@ -688,7 +781,9 @@ def fit_peak(self, _):
def saving_session(self):
# select output location
- o_output_folder = FileFolderBrowser(working_dir=self.working_dir, next_function=self.export_session)
+ o_output_folder = FileFolderBrowser(
+ working_dir=self.working_dir, next_function=self.export_session
+ )
o_output_folder.select_output_folder(instruction="Select output folder ...")
def export_session(self, output_folder=None):
@@ -699,7 +794,9 @@ def export_session(self, output_folder=None):
base, _ = os.path.splitext(os.path.basename(input_nexus_filename))
current_time = get_current_time_in_special_file_name_format()
- output_file_name = os.path.abspath(os.path.join(output_folder, f"config_{base}_{current_time}.cfg"))
+ output_file_name = os.path.abspath(
+ os.path.join(output_folder, f"config_{base}_{current_time}.cfg")
+ )
# record all parameters
rois_selected = self.rois_selected
diff --git a/notebooks/__code/timepix3_image_statistics/main.py b/notebooks/__code/timepix3_image_statistics/main.py
index 7ac3a57e..adb1a808 100644
--- a/notebooks/__code/timepix3_image_statistics/main.py
+++ b/notebooks/__code/timepix3_image_statistics/main.py
@@ -7,13 +7,11 @@
import numpy as np
import logging as notebook_logging
import matplotlib.pyplot as plt
-from matplotlib.patches import Rectangle
from ipywidgets import interactive
from ipywidgets import widgets
from PIL import Image
from __code.ipywe import fileselector
-from __code._utilities.file import get_full_log_file_name
from __code.timepix3_image_statistics import config
# Setup plotting
@@ -24,11 +22,9 @@
class Timepix3ImageStatistics:
-
-
def __init__(self, working_dir=None, debug=False):
self.working_dir = working_dir
- self.debug = debug
+ self.debug = debug
self.initialize()
def initialize(self):
@@ -72,11 +68,11 @@ def load_data(self):
def display_integrated_image(self):
fig, ax = plt.subplots(figsize=(8, 8))
- ax.imshow(self.integrated_image, cmap='viridis', origin='lower')
- ax.set_title('Integrated Image')
- ax.set_xlabel('X (pixels)')
- ax.set_ylabel('Y (pixels)')
- fig.colorbar(ax.images[0], ax=ax, label='Counts')
+ ax.imshow(self.integrated_image, cmap="viridis", origin="lower")
+ ax.set_title("Integrated Image")
+ ax.set_xlabel("X (pixels)")
+ ax.set_ylabel("Y (pixels)")
+ fig.colorbar(ax.images[0], ax=ax, label="Counts")
plt.show()
def display_chips(self):
@@ -84,89 +80,89 @@ def display_chips(self):
self.chip2 = self.integrated_image[0:256, 0:256]
self.chip3 = self.integrated_image[256:, 0:256]
self.chip4 = self.integrated_image[256:, 256:]
-
- cmap = 'viridis' # 'gray', 'viridis', 'plasma', 'inferno', 'magma', 'cividis'
+
+ cmap = "viridis" # 'gray', 'viridis', 'plasma', 'inferno', 'magma', 'cividis'
fig, axs = plt.subplots(2, 2, figsize=(10, 8))
im01 = axs[0, 1].imshow(self.chip1, cmap=cmap, vmin=0)
fig.colorbar(im01, ax=axs[0, 1])
- axs[0, 1].set_title('Chip 1')
+ axs[0, 1].set_title("Chip 1")
im02 = axs[0, 0].imshow(self.chip2, cmap=cmap, vmin=0)
fig.colorbar(im02, ax=axs[0, 0])
- axs[0, 0].set_title('Chip 2')
+ axs[0, 0].set_title("Chip 2")
im03 = axs[1, 0].imshow(self.chip3, cmap=cmap, vmin=0)
fig.colorbar(im03, ax=axs[1, 0])
- axs[1, 0].set_title('Chip 3')
+ axs[1, 0].set_title("Chip 3")
im04 = axs[1, 1].imshow(self.chip4, cmap=cmap, vmin=0)
fig.colorbar(im04, ax=axs[1, 1])
- axs[1, 1].set_title('Chip 4')
+ axs[1, 1].set_title("Chip 4")
fig.tight_layout()
# compare histograms of each chips
fig, ax = plt.subplots(figsize=(8, 6))
- ax.hist(self.chip1.ravel(), bins=256, alpha=0.5, label='Chip 1')
- ax.hist(self.chip2.ravel(), bins=256, alpha=0.5, label='Chip 2')
- ax.hist(self.chip3.ravel(), bins=256, alpha=0.5, label='Chip 3')
- ax.hist(self.chip4.ravel(), bins=256, alpha=0.5, label='Chip 4')
- ax.set_yscale('log')
- ax.set_xlabel('Pixel Value')
- ax.set_ylabel('Frequency')
+ ax.hist(self.chip1.ravel(), bins=256, alpha=0.5, label="Chip 1")
+ ax.hist(self.chip2.ravel(), bins=256, alpha=0.5, label="Chip 2")
+ ax.hist(self.chip3.ravel(), bins=256, alpha=0.5, label="Chip 3")
+ ax.hist(self.chip4.ravel(), bins=256, alpha=0.5, label="Chip 4")
+ ax.set_yscale("log")
+ ax.set_xlabel("Pixel Value")
+ ax.set_ylabel("Frequency")
ax.legend()
plt.show()
def process_chips(self):
- stat = {'min': [], 'max': [], 'mean': [], 'median': [], 'std': [], 'sum': []}
+ stat = {"min": [], "max": [], "mean": [], "median": [], "std": [], "sum": []}
Timepix3ImageStatistics.chip_stats(self.chip1, stat)
Timepix3ImageStatistics.chip_stats(self.chip2, stat)
Timepix3ImageStatistics.chip_stats(self.chip3, stat)
Timepix3ImageStatistics.chip_stats(self.chip4, stat)
fig, axs = plt.subplots(2, 3, figsize=(10, 7))
- axs[0][0].plot(stat['min'], marker='o')
- axs[0][0].set_title('Min')
+ axs[0][0].plot(stat["min"], marker="o")
+ axs[0][0].set_title("Min")
axs[0][0].set_xticks([0, 1, 2, 3])
- axs[0][0].set_xticklabels(['Chip 1', 'Chip 2', 'Chip 3', 'Chip 4'])
+ axs[0][0].set_xticklabels(["Chip 1", "Chip 2", "Chip 3", "Chip 4"])
- axs[0][1].plot(stat['max'], marker='o')
- axs[0][1].set_title('Max')
+ axs[0][1].plot(stat["max"], marker="o")
+ axs[0][1].set_title("Max")
axs[0][1].set_xticks([0, 1, 2, 3])
- axs[0][1].set_xticklabels(['Chip 1', 'Chip 2', 'Chip 3', 'Chip 4'])
+ axs[0][1].set_xticklabels(["Chip 1", "Chip 2", "Chip 3", "Chip 4"])
- axs[0][2].plot(stat['mean'], marker='o')
- axs[0][2].set_title('Mean')
+ axs[0][2].plot(stat["mean"], marker="o")
+ axs[0][2].set_title("Mean")
axs[0][2].set_xticks([0, 1, 2, 3])
- axs[0][2].set_xticklabels(['Chip 1', 'Chip 2', 'Chip 3', 'Chip 4'])
+ axs[0][2].set_xticklabels(["Chip 1", "Chip 2", "Chip 3", "Chip 4"])
- axs[1][0].plot(stat['median'], marker='o')
- axs[1][0].set_title('Median')
+ axs[1][0].plot(stat["median"], marker="o")
+ axs[1][0].set_title("Median")
axs[1][0].set_xticks([0, 1, 2, 3])
- axs[1][0].set_xticklabels(['Chip 1', 'Chip 2', 'Chip 3', 'Chip 4'])
+ axs[1][0].set_xticklabels(["Chip 1", "Chip 2", "Chip 3", "Chip 4"])
- axs[1][1].plot(stat['std'], marker='o')
- axs[1][1].set_title('Standard Deviation')
+ axs[1][1].plot(stat["std"], marker="o")
+ axs[1][1].set_title("Standard Deviation")
axs[1][1].set_xticks([0, 1, 2, 3])
- axs[1][1].set_xticklabels(['Chip 1', 'Chip 2', 'Chip 3', 'Chip 4'])
+ axs[1][1].set_xticklabels(["Chip 1", "Chip 2", "Chip 3", "Chip 4"])
- axs[1][2].plot(stat['sum'], marker='o')
- axs[1][2].set_title('Sum')
+ axs[1][2].plot(stat["sum"], marker="o")
+ axs[1][2].set_title("Sum")
axs[1][2].set_xticks([0, 1, 2, 3])
- axs[1][2].set_xticklabels(['Chip 1', 'Chip 2', 'Chip 3', 'Chip 4'])
+ axs[1][2].set_xticklabels(["Chip 1", "Chip 2", "Chip 3", "Chip 4"])
fig.tight_layout()
# let's get the statistics of each chip
@staticmethod
def chip_stats(chip, stat):
- stat['min'].append(np.min(chip))
- stat['max'].append(np.max(chip))
- stat['mean'].append(np.mean(chip))
- stat['median'].append(np.median(chip))
- stat['std'].append(np.std(chip))
- stat['sum'].append(np.sum(chip))
+ stat["min"].append(np.min(chip))
+ stat["max"].append(np.max(chip))
+ stat["mean"].append(np.mean(chip))
+ stat["median"].append(np.median(chip))
+ stat["std"].append(np.std(chip))
+ stat["sum"].append(np.sum(chip))
def locate_dead_pixels(self):
# locate all the dead pixels (value = 0 )
@@ -175,7 +171,8 @@ def locate_dead_pixels(self):
dead_pixels_chip3 = np.where(self.chip3 == 0)
dead_pixels_chip4 = np.where(self.chip4 == 0)
- display(HTML(f"""
+ display(
+ HTML(f"""
Dead Pixels Information
| Chip | Number of Dead Pixels |
@@ -184,127 +181,158 @@ def locate_dead_pixels(self):
| Chip 3 | {len(dead_pixels_chip3[0])} |
| Chip 4 | {len(dead_pixels_chip4[0])} |
- """))
+ """)
+ )
# highlight the dead pixels in each chip
- cmap = 'viridis'
+ cmap = "viridis"
fig, axs = plt.subplots(2, 2, figsize=(10, 8))
im01 = axs[0, 1].imshow(self.chip1, cmap=cmap)
- axs[0, 1].scatter(dead_pixels_chip1[1], dead_pixels_chip1[0], color='y', s=1)
+ axs[0, 1].scatter(dead_pixels_chip1[1], dead_pixels_chip1[0], color="y", s=1)
fig.colorbar(im01, ax=axs[0, 1])
- axs[0, 1].set_title(f'Chip 1 ({len(dead_pixels_chip1[0])} dead pixels)')
+ axs[0, 1].set_title(f"Chip 1 ({len(dead_pixels_chip1[0])} dead pixels)")
im02 = axs[0, 0].imshow(self.chip2, cmap=cmap)
- axs[0, 0].scatter(dead_pixels_chip2[1], dead_pixels_chip2[0], color='y', s=1)
+ axs[0, 0].scatter(dead_pixels_chip2[1], dead_pixels_chip2[0], color="y", s=1)
fig.colorbar(im02, ax=axs[0, 0])
- axs[0, 0].set_title(f'Chip 2 ({len(dead_pixels_chip2[0])} dead pixels)')
+ axs[0, 0].set_title(f"Chip 2 ({len(dead_pixels_chip2[0])} dead pixels)")
im03 = axs[1, 0].imshow(self.chip3, cmap=cmap)
- axs[1, 0].scatter(dead_pixels_chip3[1], dead_pixels_chip3[0], color='y', s=1)
- fig.colorbar(im03, ax=axs[1, 0])
- axs[1, 0].set_title(f'Chip 3 ({len(dead_pixels_chip3[0])} dead pixels)')
+ axs[1, 0].scatter(dead_pixels_chip3[1], dead_pixels_chip3[0], color="y", s=1)
+ fig.colorbar(im03, ax=axs[1, 0])
+ axs[1, 0].set_title(f"Chip 3 ({len(dead_pixels_chip3[0])} dead pixels)")
im04 = axs[1, 1].imshow(self.chip4, cmap=cmap)
- axs[1, 1].scatter(dead_pixels_chip4[1], dead_pixels_chip4[0], color='y', s=1)
+ axs[1, 1].scatter(dead_pixels_chip4[1], dead_pixels_chip4[0], color="y", s=1)
fig.colorbar(im04, ax=axs[1, 1])
- axs[1, 1].set_title(f'Chip 4 ({len(dead_pixels_chip4[0])} dead pixels)')
+ axs[1, 1].set_title(f"Chip 4 ({len(dead_pixels_chip4[0])} dead pixels)")
plt.show()
def locate_high_pixels(self):
- default_threshold = 0.1 * np.max(self.integrated_image) # 10% of max value
+ default_threshold = 0.1 * np.max(self.integrated_image) # 10% of max value
def display_high_pixels(threshold):
-
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(10, 8))
- im1 = axs[0, 1].imshow(self.chip1, cmap='viridis', origin='lower')
+ im1 = axs[0, 1].imshow(self.chip1, cmap="viridis", origin="lower")
high_pixels1 = np.where(self.chip1 >= threshold)
- display(HTML(f"Number of high pixels in chip1 (>= {threshold}): {len(high_pixels1[0])}"))
- axs[0, 1].scatter(high_pixels1[1], high_pixels1[0], color='r', s=1)
- axs[0, 1].set_title(f'High Pixels (>= {threshold})')
- axs[0, 1].set_xlabel('X (pixels)')
- axs[0, 1].set_ylabel('Y (pixels)')
- axs[0, 1].set_title('Chip 1')
+ display(
+ HTML(
+ f"Number of high pixels in chip1 (>= {threshold}): {len(high_pixels1[0])}"
+ )
+ )
+ axs[0, 1].scatter(high_pixels1[1], high_pixels1[0], color="r", s=1)
+ axs[0, 1].set_title(f"High Pixels (>= {threshold})")
+ axs[0, 1].set_xlabel("X (pixels)")
+ axs[0, 1].set_ylabel("Y (pixels)")
+ axs[0, 1].set_title("Chip 1")
fig.colorbar(im1, ax=axs[0, 1])
high_pixels2 = np.where(self.chip2 >= threshold)
- display(HTML(f"Number of high pixels in chip2 (>= {threshold}): {len(high_pixels2[0])}"))
- im2 = axs[0, 0].imshow(self.chip2, cmap='viridis', origin='lower')
- axs[0, 0].scatter(high_pixels2[1], high_pixels2[0], color='r', s=1)
- axs[0, 0].set_title(f'High Pixels (>= {threshold})')
- axs[0, 0].set_xlabel('X (pixels)')
- axs[0, 0].set_ylabel('Y (pixels)')
- axs[0, 0].set_title('Chip 2')
+ display(
+ HTML(
+ f"Number of high pixels in chip2 (>= {threshold}): {len(high_pixels2[0])}"
+ )
+ )
+ im2 = axs[0, 0].imshow(self.chip2, cmap="viridis", origin="lower")
+ axs[0, 0].scatter(high_pixels2[1], high_pixels2[0], color="r", s=1)
+ axs[0, 0].set_title(f"High Pixels (>= {threshold})")
+ axs[0, 0].set_xlabel("X (pixels)")
+ axs[0, 0].set_ylabel("Y (pixels)")
+ axs[0, 0].set_title("Chip 2")
fig.colorbar(im2, ax=axs[0, 0])
- im3 = axs[1, 0].imshow(self.chip3, cmap='viridis', origin='lower')
+ im3 = axs[1, 0].imshow(self.chip3, cmap="viridis", origin="lower")
high_pixels3 = np.where(self.chip3 >= threshold)
- display(HTML(f"Number of high pixels in chip3 (>= {threshold}): {len(high_pixels3[0])}"))
- axs[1, 0].scatter(high_pixels3[1], high_pixels3[0], color='r', s=1)
- axs[1, 0].set_title(f'High Pixels (>= {threshold})')
- axs[1, 0].set_xlabel('X (pixels)')
- axs[1, 0].set_ylabel('Y (pixels)')
- axs[1, 0].set_title('Chip 3')
+ display(
+ HTML(
+ f"Number of high pixels in chip3 (>= {threshold}): {len(high_pixels3[0])}"
+ )
+ )
+ axs[1, 0].scatter(high_pixels3[1], high_pixels3[0], color="r", s=1)
+ axs[1, 0].set_title(f"High Pixels (>= {threshold})")
+ axs[1, 0].set_xlabel("X (pixels)")
+ axs[1, 0].set_ylabel("Y (pixels)")
+ axs[1, 0].set_title("Chip 3")
fig.colorbar(im3, ax=axs[1, 0])
- im4 = axs[1, 1].imshow(self.chip4, cmap='viridis', origin='lower')
+ im4 = axs[1, 1].imshow(self.chip4, cmap="viridis", origin="lower")
high_pixels4 = np.where(self.chip4 >= threshold)
- display(HTML(f"Number of high pixels in chip4 (>= {threshold}): {len(high_pixels4[0])}"))
- axs[1, 1].scatter(high_pixels4[1], high_pixels4[0], color='r', s=1)
- axs[1, 1].set_title(f'High Pixels (>= {threshold})')
- axs[1, 1].set_xlabel('X (pixels)')
- axs[1, 1].set_ylabel('Y (pixels)')
- axs[1, 1].set_title('Chip 4')
+ display(
+ HTML(
+ f"Number of high pixels in chip4 (>= {threshold}): {len(high_pixels4[0])}"
+ )
+ )
+ axs[1, 1].scatter(high_pixels4[1], high_pixels4[0], color="r", s=1)
+ axs[1, 1].set_title(f"High Pixels (>= {threshold})")
+ axs[1, 1].set_xlabel("X (pixels)")
+ axs[1, 1].set_ylabel("Y (pixels)")
+ axs[1, 1].set_title("Chip 4")
fig.colorbar(im4, ax=axs[1, 1])
plt.tight_layout()
plt.show()
- display_plot = interactive(display_high_pixels,
- threshold=widgets.IntSlider(min=0,
- max=default_threshold,
- value=np.max(self.integrated_image),
- ),
- )
+ display_plot = interactive(
+ display_high_pixels,
+ threshold=widgets.IntSlider(
+ min=0,
+ max=default_threshold,
+ value=np.max(self.integrated_image),
+ ),
+ )
display(display_plot)
-
def after_tpx3_file_selection(self, folder_name):
logging.info(f"TPX3 folder selected: {folder_name}")
sub_folder_name = self.make_sure_its_the_correct_folder(folder_name)
- logging.info(f"done running make_sure_its_the_correct_folder, Using folder: {sub_folder_name =}")
- if not (folder_name is None):
- self.get_file_infos(original_folder_name=folder_name,
- sub_folder_name=sub_folder_name)
+ logging.info(
+ f"done running make_sure_its_the_correct_folder, Using folder: {sub_folder_name =}"
+ )
+ if folder_name is not None:
+ self.get_file_infos(
+ original_folder_name=folder_name, sub_folder_name=sub_folder_name
+ )
# self.processing_tpx3(file_name=file_name)
def make_sure_its_the_correct_folder(self, folder_name):
list_of_tif_files = glob.glob(os.path.join(folder_name, "*.tif*"))
if len(list_of_tif_files) > 0:
- logging.info(f"Folder {folder_name} contains .tif files. We are good to go!")
+ logging.info(
+ f"Folder {folder_name} contains .tif files. We are good to go!"
+ )
return folder_name
else:
# trying one folder deeper
- list_of_sub_folders = [f.path for f in os.scandir(folder_name) if f.is_dir()]
+ list_of_sub_folders = [
+ f.path for f in os.scandir(folder_name) if f.is_dir()
+ ]
for sub_folder in list_of_sub_folders:
list_of_tif_files = glob.glob(os.path.join(sub_folder, "*.tif*"))
if len(list_of_tif_files) > 0:
- logging.info(f"Folder {folder_name} does not contain .tif files. Using sub-folder {sub_folder} instead")
- display(HTML(f"Folder {folder_name} does not contain .tif files. Using sub-folder {sub_folder} instead"))
+ logging.info(
+ f"Folder {folder_name} does not contain .tif files. Using sub-folder {sub_folder} instead"
+ )
+ display(
+ HTML(
+ f"Folder {folder_name} does not contain .tif files. Using sub-folder {sub_folder} instead"
+ )
+ )
return sub_folder
- logging.info(f"Folder {folder_name} does not contain .tif files. Please select another folder")
+ logging.info(
+ f"Folder {folder_name} does not contain .tif files. Please select another folder"
+ )
self.select_tpx3_folder()
return None
-
+
def get_file_infos(self, original_folder_name=None, sub_folder_name=None):
-
if sub_folder_name:
logging.info(f"Getting folder info for: {sub_folder_name}")
-
+
# Simulate getting file info
self.file_info = {
"original_folder_name": original_folder_name,
"sub_folder_name": sub_folder_name,
"base name": os.path.basename(sub_folder_name),
- "path" : os.path.dirname(sub_folder_name),
+ "path": os.path.dirname(sub_folder_name),
"size": f"{os.path.getsize(sub_folder_name) / (1024*1024):.2f} MB",
}
@@ -318,18 +346,24 @@ def get_file_infos(self, original_folder_name=None, sub_folder_name=None):
self.file_info["number of .tif files"] = len(list_of_tiff_files)
# size of images
total_size = sum(os.path.getsize(f) for f in list_of_tiff_files)
- self.file_info["total size of .tif files"] = f"{total_size / (1024*1024):.2f} MB"
+ self.file_info["total size of .tif files"] = (
+ f"{total_size / (1024*1024):.2f} MB"
+ )
# size of first image
if len(list_of_tiff_files) > 0:
- self.file_info["size of each .tif file"] = f"{os.path.getsize(list_of_tiff_files[0]) / (1024*1024):.2f} MB"
+ self.file_info["size of each .tif file"] = (
+ f"{os.path.getsize(list_of_tiff_files[0]) / (1024*1024):.2f} MB"
+ )
else:
self.file_info["size of each .tif file"] = "N/A"
- logging.info(f"File info:")
+ logging.info("File info:")
for _key in self.file_info:
logging.info(f"\t{_key}: {self.file_info[_key]}")
- display(HTML("""
+ display(
+ HTML(
+ """
TPX3 Folder Information
| Property | Value |
@@ -343,26 +377,31 @@ def get_file_infos(self, original_folder_name=None, sub_folder_name=None):
| Total size of .tif files | {} |
| Size of each .tif file | {} |
- """.format(self.file_info["original_folder_name"],
- self.file_info["sub_folder_name"],
- self.file_info["base name"],
- self.file_info["path"],
- self.file_info["size"],
- self.file_info["modified"],
- self.file_info["number of .tif files"],
- self.file_info["total size of .tif files"],
- self.file_info["size of each .tif file"])))
+ """.format(
+ self.file_info["original_folder_name"],
+ self.file_info["sub_folder_name"],
+ self.file_info["base name"],
+ self.file_info["path"],
+ self.file_info["size"],
+ self.file_info["modified"],
+ self.file_info["number of .tif files"],
+ self.file_info["total size of .tif files"],
+ self.file_info["size of each .tif file"],
+ )
+ )
+ )
else:
logging.info(f"Folder not found: {sub_folder_name}")
def processing_tpx3(self, file_name=None):
-
- logging.info(f"Processing TPX3 file: {os.path.basename(file_name)} ... (be patient!)")
+ logging.info(
+ f"Processing TPX3 file: {os.path.basename(file_name)} ... (be patient!)"
+ )
start_time = time.time()
-
+
# loading tpx3 file
- hits_view = tdc.process_tpx3(file_name, parallel=True)
+ hits_view = tdc.process_tpx3(file_name, parallel=True)
hits = np.array(hits_view, copy=False)
self.hits = hits
@@ -372,9 +411,11 @@ def processing_tpx3(self, file_name=None):
display(HTML(f"Processing completed in {processing_time:.2f} seconds."))
# display result as a table
- from_tof = hits['tof'].min() * 25 / 1e6
- to_tof = hits['tof'].max() * 25 / 1e6
- display(HTML("""
+ from_tof = hits["tof"].min() * 25 / 1e6
+ to_tof = hits["tof"].max() * 25 / 1e6
+ display(
+ HTML(
+ """
TPX3 Processing Results
| Infos | Value |
@@ -383,25 +424,35 @@ def processing_tpx3(self, file_name=None):
| Y range | {} - {} |
| TOF range (ms) | {:.3f} - {:.3f} |
- """.format(len(hits), hits['x'].min(), hits['x'].max(), hits['y'].min(), hits['y'].max(),
- from_tof, to_tof)))
+ """.format(
+ len(hits),
+ hits["x"].min(),
+ hits["x"].max(),
+ hits["y"].min(),
+ hits["y"].max(),
+ from_tof,
+ to_tof,
+ )
+ )
+ )
unique_chips, chip_counts = np.unique(hits["chip_id"], return_counts=True)
- _text = 'Chips distribution (hits per chips)
' + \
- ''
+ _text = (
+ "Chips distribution (hits per chips)
"
+ + ''
+ )
for chip, count in zip(unique_chips, chip_counts):
percentage = 100 * count / len(hits)
- _text += f'| Chip {chip} | {count:,} hits ({percentage:.1f}%) |
'
- _text += '
'
+ _text += f"| Chip {chip} | {count:,} hits ({percentage:.1f}%) |
"
+ _text += "
"
display(HTML(_text))
def select_sampling_percentage(self):
if len(self.hits) > 100_000:
label = widgets.Label("Select sampling percentage:")
- self.sampling_percentage_ui = widgets.FloatSlider(min=0.01,
- max=100,
- value=0.1,
- step=0.01)
+ self.sampling_percentage_ui = widgets.FloatSlider(
+ min=0.01, max=100, value=0.1, step=0.01
+ )
hori_layout = widgets.HBox([label, self.sampling_percentage_ui])
self.apply_sampling = True
display(hori_layout)
@@ -410,32 +461,36 @@ def select_sampling_percentage(self):
self.apply_sampling = False
def display_image_with_roi(self):
- self.generate_2d_hit_map()
+ self.generate_2d_hit_map()
def generate_2d_hit_map(self):
- sample_fraction = self.sampling_percentage_ui.value / 100.
+ sample_fraction = self.sampling_percentage_ui.value / 100.0
if self.apply_sampling:
n_sample = int(len(self.hits) * sample_fraction)
- sample_indices = np.random.choice(len(self.hits), size=n_sample, replace=False)
+ sample_indices = np.random.choice(
+ len(self.hits), size=n_sample, replace=False
+ )
hits_for_viz = self.hits[sample_indices]
- display(HTML(f"Using {n_sample:,} sampled hits ({sample_fraction*100:.1f}%) for visualization"))
+ display(
+ HTML(
+ f"Using {n_sample:,} sampled hits ({sample_fraction*100:.1f}%) for visualization"
+ )
+ )
else:
hits_for_viz = self.hits
display(HTML(f"Using all {len(self.hits):,} hits for visualization"))
-
+
# Create 2D histogram (bin by detector pixels)
x_bins = np.arange(0, 515, 1) # 0 to 514 pixels
y_bins = np.arange(0, 515, 1) # 0 to 514 pixels
self.hist2d, self.x_edges, self.y_edges = np.histogram2d(
- hits_for_viz["x"], hits_for_viz["y"], bins=[x_bins, y_bins])
+ hits_for_viz["x"], hits_for_viz["y"], bins=[x_bins, y_bins]
+ )
def select_roi(self):
pass
-
+
def generate_histogram_and_select_roi(self):
self.generate_2d_hit_map()
self.select_roi()
-
-
-
diff --git a/notebooks/__code/timepix3_raw_to_profile_of_roi/main.py b/notebooks/__code/timepix3_raw_to_profile_of_roi/main.py
index 67acebf7..fcf67a0c 100644
--- a/notebooks/__code/timepix3_raw_to_profile_of_roi/main.py
+++ b/notebooks/__code/timepix3_raw_to_profile_of_roi/main.py
@@ -5,8 +5,6 @@
import time
import numpy as np
import matplotlib.pyplot as plt
-from matplotlib.patches import Rectangle
-from ipywidgets import interactive
from ipywidgets import widgets
from __code.ipywe import fileselector
@@ -21,7 +19,6 @@
class Timepix3RawToProfileOfRoi:
-
apply_sampling = False
hist2d = None
@@ -42,7 +39,6 @@ def __init__(self, working_dir=None, debug=False):
logging.info("*** Starting a new session ***")
logging.info(f"Debug mode is {self.debug}")
print(f"log file: {self.log_file_name}")
-
def select_tpx3(self):
if self.debug:
@@ -58,34 +54,34 @@ def select_tpx3(self):
)
self.nexus_ui.show()
- def after_tpx3_file_selection(self, file_name):
+ def after_tpx3_file_selection(self, file_name):
self.get_file_infos(file_name=file_name)
self.processing_tpx3(file_name=file_name)
def get_file_infos(self, file_name=None):
-
if file_name:
logging.info(f"Getting file info for: {file_name}")
# Simulate getting file info
self.file_info = {
"base name": os.path.basename(file_name),
- "path" : os.path.dirname(file_name),
+ "path": os.path.dirname(file_name),
"size": f"{os.path.getsize(file_name) / (1024*1024):.2f} MB",
"modified": os.path.getmtime(file_name),
}
- logging.info(f"File info:")
+ logging.info("File info:")
for _key in self.file_info:
logging.info(f"\t{_key}: {self.file_info[_key]}")
else:
logging.info(f"File not found: {file_name}")
def processing_tpx3(self, file_name=None):
-
- logging.info(f"Processing TPX3 file: {os.path.basename(file_name)} ... (be patient!)")
+ logging.info(
+ f"Processing TPX3 file: {os.path.basename(file_name)} ... (be patient!)"
+ )
start_time = time.time()
-
+
# loading tpx3 file
- hits_view = tdc.process_tpx3(file_name, parallel=True)
+ hits_view = tdc.process_tpx3(file_name, parallel=True)
hits = np.array(hits_view, copy=False)
self.hits = hits
@@ -95,9 +91,11 @@ def processing_tpx3(self, file_name=None):
display(HTML(f"Processing completed in {processing_time:.2f} seconds."))
# display result as a table
- from_tof = hits['tof'].min() * 25 / 1e6
- to_tof = hits['tof'].max() * 25 / 1e6
- display(HTML("""
+ from_tof = hits["tof"].min() * 25 / 1e6
+ to_tof = hits["tof"].max() * 25 / 1e6
+ display(
+ HTML(
+ """
TPX3 Processing Results
| Infos | Value |
@@ -106,25 +104,35 @@ def processing_tpx3(self, file_name=None):
| Y range | {} - {} |
| TOF range (ms) | {:.3f} - {:.3f} |
- """.format(len(hits), hits['x'].min(), hits['x'].max(), hits['y'].min(), hits['y'].max(),
- from_tof, to_tof)))
+ """.format(
+ len(hits),
+ hits["x"].min(),
+ hits["x"].max(),
+ hits["y"].min(),
+ hits["y"].max(),
+ from_tof,
+ to_tof,
+ )
+ )
+ )
unique_chips, chip_counts = np.unique(hits["chip_id"], return_counts=True)
- _text = 'Chips distribution (hits per chips)
' + \
- ''
+ _text = (
+ "Chips distribution (hits per chips)
"
+ + ''
+ )
for chip, count in zip(unique_chips, chip_counts):
percentage = 100 * count / len(hits)
- _text += f'| Chip {chip} | {count:,} hits ({percentage:.1f}%) |
'
- _text += '
'
+ _text += f"| Chip {chip} | {count:,} hits ({percentage:.1f}%) |
"
+ _text += "
"
display(HTML(_text))
def select_sampling_percentage(self):
if len(self.hits) > 100_000:
label = widgets.Label("Select sampling percentage:")
- self.sampling_percentage_ui = widgets.FloatSlider(min=0.01,
- max=100,
- value=0.1,
- step=0.01)
+ self.sampling_percentage_ui = widgets.FloatSlider(
+ min=0.01, max=100, value=0.1, step=0.01
+ )
hori_layout = widgets.HBox([label, self.sampling_percentage_ui])
self.apply_sampling = True
display(hori_layout)
@@ -133,32 +141,36 @@ def select_sampling_percentage(self):
self.apply_sampling = False
def display_image_with_roi(self):
- self.generate_2d_hit_map()
+ self.generate_2d_hit_map()
def generate_2d_hit_map(self):
- sample_fraction = self.sampling_percentage_ui.value / 100.
+ sample_fraction = self.sampling_percentage_ui.value / 100.0
if self.apply_sampling:
n_sample = int(len(self.hits) * sample_fraction)
- sample_indices = np.random.choice(len(self.hits), size=n_sample, replace=False)
+ sample_indices = np.random.choice(
+ len(self.hits), size=n_sample, replace=False
+ )
hits_for_viz = self.hits[sample_indices]
- display(HTML(f"Using {n_sample:,} sampled hits ({sample_fraction*100:.1f}%) for visualization"))
+ display(
+ HTML(
+ f"Using {n_sample:,} sampled hits ({sample_fraction*100:.1f}%) for visualization"
+ )
+ )
else:
hits_for_viz = self.hits
display(HTML(f"Using all {len(self.hits):,} hits for visualization"))
-
+
# Create 2D histogram (bin by detector pixels)
x_bins = np.arange(0, 515, 1) # 0 to 514 pixels
y_bins = np.arange(0, 515, 1) # 0 to 514 pixels
self.hist2d, self.x_edges, self.y_edges = np.histogram2d(
- hits_for_viz["x"], hits_for_viz["y"], bins=[x_bins, y_bins])
+ hits_for_viz["x"], hits_for_viz["y"], bins=[x_bins, y_bins]
+ )
def select_roi(self):
pass
-
+
def generate_histogram_and_select_roi(self):
self.generate_2d_hit_map()
self.select_roi()
-
-
-
diff --git a/notebooks/__code/truncate_file_names/truncate_file_names.py b/notebooks/__code/truncate_file_names/truncate_file_names.py
index 5738d351..eaac3f9d 100755
--- a/notebooks/__code/truncate_file_names/truncate_file_names.py
+++ b/notebooks/__code/truncate_file_names/truncate_file_names.py
@@ -16,7 +16,9 @@ def __init__(self, working_dir=""):
def select_input_files(self):
self.input_files_ui = fileselector.FileSelectorPanel(
- instruction="Select List of Files", start_dir=self.working_dir, multiple=True
+ instruction="Select List of Files",
+ start_dir=self.working_dir,
+ multiple=True,
)
self.input_files_ui.show()
@@ -39,8 +41,12 @@ def __init__(self, o_truncate=None):
raise ValueError("TruncateFileNames is missing!")
if self.list_of_files:
- _random_input_list = utilities.get_n_random_element(input_list=self.list_of_files, n=10)
- self.random_input_list = [os.path.basename(_file) for _file in _random_input_list]
+ _random_input_list = utilities.get_n_random_element(
+ input_list=self.list_of_files, n=10
+ )
+ self.random_input_list = [
+ os.path.basename(_file) for _file in _random_input_list
+ ]
self.basename = os.path.basename(self.list_of_files[0])
self.working_dir = os.path.dirname(os.path.dirname(self.list_of_files[0]))
@@ -62,28 +68,40 @@ def show(self):
[
widgets.Label("Random Input:", layout=widgets.Layout(width="15%")),
widgets.Dropdown(
- options=self.random_input_list, value=self.random_input_list[0], layout=widgets.Layout(width="50%")
+ options=self.random_input_list,
+ value=self.random_input_list[0],
+ layout=widgets.Layout(width="50%"),
),
]
)
self.random_input_checkbox = self.box1.children[1]
- self.random_input_checkbox.observe(self.random_input_checkbox_value_changed, "value")
+ self.random_input_checkbox.observe(
+ self.random_input_checkbox_value_changed, "value"
+ )
- self.box2 = widgets.HBox([widgets.Label("String to remove:", layout=widgets.Layout(width="30%"))])
+ self.box2 = widgets.HBox(
+ [widgets.Label("String to remove:", layout=widgets.Layout(width="30%"))]
+ )
self.box6 = widgets.HBox(
[
- widgets.Label(value=" On the left:", layout=widgets.Layout(width="40%")),
+ widgets.Label(
+ value=" On the left:", layout=widgets.Layout(width="40%")
+ ),
widgets.Text(value="", layout=widgets.Layout(width="60%")),
]
)
self.box7 = widgets.HBox(
[
- widgets.Label(value=" On the right:", layout=widgets.Layout(width="40%")),
+ widgets.Label(
+ value=" On the right:", layout=widgets.Layout(width="40%")
+ ),
widgets.Text(value="", layout=widgets.Layout(width="60%")),
]
)
- self.box8 = widgets.VBox([self.box6, self.box7], layout=widgets.Layout(width="50%"))
+ self.box8 = widgets.VBox(
+ [self.box6, self.box7], layout=widgets.Layout(width="50%")
+ )
self.left_part_to_remove_text = self.box6.children[1]
self.right_part_to_remove_text = self.box7.children[1]
self.left_part_to_remove_text.observe(self.left_part_text_changed, "value")
@@ -99,7 +117,9 @@ def show(self):
separate_line = widgets.HTML(value="
", layout=widgets.Layout(width="100%"))
- vbox = widgets.VBox([self.box1, separate_line, self.box2, self.box8, separate_line, self.box9])
+ vbox = widgets.VBox(
+ [self.box1, separate_line, self.box2, self.box8, separate_line, self.box9]
+ )
display(vbox)
self.update_new_file_name()
@@ -127,8 +147,14 @@ def update_new_file_name(self):
def check_new_names(self):
dict_old_new_names = self.create_dict_old_new_filenames()
- old_names_new_names = [f"{os.path.basename(_key)} -> {_value}" for _key, _value in dict_old_new_names.items()]
- select_widget = widgets.Select(options=old_names_new_names, layout=widgets.Layout(width="100%", height="400px"))
+ old_names_new_names = [
+ f"{os.path.basename(_key)} -> {_value}"
+ for _key, _value in dict_old_new_names.items()
+ ]
+ select_widget = widgets.Select(
+ options=old_names_new_names,
+ layout=widgets.Layout(width="100%", height="400px"),
+ )
display(select_widget)
def create_dict_old_new_filenames(self):
@@ -165,7 +191,9 @@ def rename_and_export_files(self, output_folder=None):
input_folder = os.path.abspath(self.input_folder)
input_folder_renamed = os.path.basename(input_folder) + "_renamed"
self.output_folder_ui.shortcut_buttons.close()
- new_output_folder = os.path.join(os.path.abspath(output_folder), input_folder_renamed)
+ new_output_folder = os.path.join(
+ os.path.abspath(output_folder), input_folder_renamed
+ )
dict_old_new_names = self.create_dict_old_new_filenames()
utilities.copy_files(
diff --git a/notebooks/__code/ui_addie.py b/notebooks/__code/ui_addie.py
index d72ac030..990f234c 100755
--- a/notebooks/__code/ui_addie.py
+++ b/notebooks/__code/ui_addie.py
@@ -102,7 +102,9 @@ def setupUi(self, MainWindow):
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.treeWidget = QtWidgets.QTreeWidget(self.centralwidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.treeWidget.sizePolicy().hasHeightForWidth())
@@ -111,7 +113,9 @@ def setupUi(self, MainWindow):
self.treeWidget.setMaximumSize(QtCore.QSize(250, 16777215))
self.treeWidget.setObjectName("treeWidget")
self.verticalLayout_2.addWidget(self.treeWidget)
- spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout.addLayout(self.verticalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
@@ -124,7 +128,9 @@ def setupUi(self, MainWindow):
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
- self.h3_table.customContextMenuRequested["QPoint"].connect(MainWindow.h3_table_right_click)
+ self.h3_table.customContextMenuRequested["QPoint"].connect(
+ MainWindow.h3_table_right_click
+ )
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
@@ -173,4 +179,6 @@ def retranslateUi(self, MainWindow):
item = self.h3_table.item(0, 7)
item.setText(_translate("MainWindow", "8"))
self.h3_table.setSortingEnabled(__sortingEnabled)
- self.treeWidget.headerItem().setText(0, _translate("MainWindow", "Columns Visibility"))
+ self.treeWidget.headerItem().setText(
+ 0, _translate("MainWindow", "Columns Visibility")
+ )
diff --git a/notebooks/__code/ui_addie_save_config.py b/notebooks/__code/ui_addie_save_config.py
index 44c84d48..0c27ee22 100755
--- a/notebooks/__code/ui_addie_save_config.py
+++ b/notebooks/__code/ui_addie_save_config.py
@@ -11,7 +11,9 @@ class Ui_Dialog:
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(401, 89)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
@@ -32,7 +34,9 @@ def setupUi(self, Dialog):
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_2.addItem(spacerItem)
self.cancel_button = QtWidgets.QPushButton(Dialog)
self.cancel_button.setObjectName("cancel_button")
diff --git a/notebooks/__code/ui_bragg_edge_peak_fitting.py b/notebooks/__code/ui_bragg_edge_peak_fitting.py
index 0cef6be3..d4e91c1d 100755
--- a/notebooks/__code/ui_bragg_edge_peak_fitting.py
+++ b/notebooks/__code/ui_bragg_edge_peak_fitting.py
@@ -22,7 +22,9 @@ def setupUi(self, MainWindow):
self.verticalLayout = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout.setObjectName("verticalLayout")
self.splitter = QtWidgets.QSplitter(self.tab)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
@@ -41,7 +43,9 @@ def setupUi(self, MainWindow):
self.label = QtWidgets.QLabel(self.tab)
self.label.setMinimumSize(QtCore.QSize(200, 0))
self.label.setMaximumSize(QtCore.QSize(200, 16777215))
- self.label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.label.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(self.tab)
@@ -57,7 +61,9 @@ def setupUi(self, MainWindow):
self.label_4 = QtWidgets.QLabel(self.tab)
self.label_4.setMinimumSize(QtCore.QSize(200, 0))
self.label_4.setMaximumSize(QtCore.QSize(200, 16777215))
- self.label_4.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.label_4.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 1, 0, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab)
@@ -71,7 +77,9 @@ def setupUi(self, MainWindow):
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 2, 1, 1)
self.horizontalLayout_2.addLayout(self.gridLayout)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.tabWidget.addTab(self.tab, "")
@@ -127,7 +135,9 @@ def setupUi(self, MainWindow):
self.cancel_button = QtWidgets.QPushButton(self.centralwidget)
self.cancel_button.setObjectName("cancel_button")
self.horizontalLayout.addWidget(self.cancel_button)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem1)
self.apply_button = QtWidgets.QPushButton(self.centralwidget)
self.apply_button.setObjectName("apply_button")
@@ -155,7 +165,9 @@ def retranslateUi(self, MainWindow):
self.label_2.setText(_translate("MainWindow", "m"))
self.label_4.setText(_translate("MainWindow", "detector offset"))
self.label_3.setText(_translate("MainWindow", "micros"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Selection"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Selection")
+ )
self.groupBox.setTitle(_translate("MainWindow", "List of algorithms"))
self.radioButton_3.setText(_translate("MainWindow", "algorithm 3"))
self.radioButton_2.setText(_translate("MainWindow", "algorithm 2"))
@@ -170,6 +182,8 @@ def retranslateUi(self, MainWindow):
item.setText(_translate("MainWindow", "coeff2 value"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "coeff2 error"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Fitting"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Fitting")
+ )
self.cancel_button.setText(_translate("MainWindow", "Cancel"))
self.apply_button.setText(_translate("MainWindow", "Use this peak"))
diff --git a/notebooks/__code/ui_calibrated_transmission.py b/notebooks/__code/ui_calibrated_transmission.py
index e4881d35..ee05fd2b 100755
--- a/notebooks/__code/ui_calibrated_transmission.py
+++ b/notebooks/__code/ui_calibrated_transmission.py
@@ -38,10 +38,14 @@ def setupUi(self, MainWindow):
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pyqtgraph_widget = QtWidgets.QWidget(self.layoutWidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.pyqtgraph_widget.sizePolicy().hasHeightForWidth())
+ sizePolicy.setHeightForWidth(
+ self.pyqtgraph_widget.sizePolicy().hasHeightForWidth()
+ )
self.pyqtgraph_widget.setSizePolicy(sizePolicy)
self.pyqtgraph_widget.setObjectName("pyqtgraph_widget")
self.horizontalLayout_2.addWidget(self.pyqtgraph_widget)
@@ -87,7 +91,9 @@ def setupUi(self, MainWindow):
self.use_calibration1_checkbox.setChecked(True)
self.use_calibration1_checkbox.setObjectName("use_calibration1_checkbox")
self.horizontalLayout_12.addWidget(self.use_calibration1_checkbox)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_12.addItem(spacerItem)
self.verticalLayout_5.addLayout(self.horizontalLayout_12)
self.gridLayout = QtWidgets.QGridLayout()
@@ -113,7 +119,9 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_x0_label.setPalette(palette)
- self.calibration1_x0_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.calibration1_x0_label.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.calibration1_x0_label.setObjectName("calibration1_x0_label")
self.gridLayout.addWidget(self.calibration1_x0_label, 0, 0, 1, 1)
self.calibration1_x0 = QtWidgets.QLineEdit(self.tab_3)
@@ -202,7 +210,9 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_y0_label.setPalette(palette)
- self.calibration1_y0_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.calibration1_y0_label.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.calibration1_y0_label.setObjectName("calibration1_y0_label")
self.gridLayout.addWidget(self.calibration1_y0_label, 1, 0, 1, 1)
self.calibration1_y0 = QtWidgets.QLineEdit(self.tab_3)
@@ -294,9 +304,13 @@ def setupUi(self, MainWindow):
self.calibration1_index.setPalette(palette)
self.calibration1_index.setObjectName("calibration1_index")
self.horizontalLayout_4.addWidget(self.calibration1_index)
- self.calibration1_display_this_file_button = QtWidgets.QPushButton(self.calibration1_groupbox)
+ self.calibration1_display_this_file_button = QtWidgets.QPushButton(
+ self.calibration1_groupbox
+ )
self.calibration1_display_this_file_button.setMinimumSize(QtCore.QSize(0, 40))
- self.calibration1_display_this_file_button.setMaximumSize(QtCore.QSize(16777215, 40))
+ self.calibration1_display_this_file_button.setMaximumSize(
+ QtCore.QSize(16777215, 40)
+ )
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
@@ -317,7 +331,9 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_display_this_file_button.setPalette(palette)
- self.calibration1_display_this_file_button.setObjectName("calibration1_display_this_file_button")
+ self.calibration1_display_this_file_button.setObjectName(
+ "calibration1_display_this_file_button"
+ )
self.horizontalLayout_4.addWidget(self.calibration1_display_this_file_button)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.label_2 = QtWidgets.QLabel(self.calibration1_groupbox)
@@ -349,9 +365,13 @@ def setupUi(self, MainWindow):
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
- self.calibration1_use_current_file_button = QtWidgets.QPushButton(self.calibration1_groupbox)
+ self.calibration1_use_current_file_button = QtWidgets.QPushButton(
+ self.calibration1_groupbox
+ )
self.calibration1_use_current_file_button.setMinimumSize(QtCore.QSize(0, 40))
- self.calibration1_use_current_file_button.setMaximumSize(QtCore.QSize(16777215, 40))
+ self.calibration1_use_current_file_button.setMaximumSize(
+ QtCore.QSize(16777215, 40)
+ )
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
@@ -372,9 +392,13 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_use_current_file_button.setPalette(palette)
- self.calibration1_use_current_file_button.setObjectName("calibration1_use_current_file_button")
+ self.calibration1_use_current_file_button.setObjectName(
+ "calibration1_use_current_file_button"
+ )
self.verticalLayout.addWidget(self.calibration1_use_current_file_button)
- spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout.addItem(spacerItem1)
self.verticalLayout_5.addWidget(self.calibration1_groupbox)
self.tabWidget_2.addTab(self.tab_3, "")
@@ -388,7 +412,9 @@ def setupUi(self, MainWindow):
self.use_calibration2_checkbox.setChecked(True)
self.use_calibration2_checkbox.setObjectName("use_calibration2_checkbox")
self.horizontalLayout_13.addWidget(self.use_calibration2_checkbox)
- spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem2 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_13.addItem(spacerItem2)
self.verticalLayout_6.addLayout(self.horizontalLayout_13)
self.gridLayout_2 = QtWidgets.QGridLayout()
@@ -405,7 +431,9 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_x0_label.setPalette(palette)
- self.calibration2_x0_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.calibration2_x0_label.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.calibration2_x0_label.setObjectName("calibration2_x0_label")
self.gridLayout_2.addWidget(self.calibration2_x0_label, 0, 0, 1, 1)
self.calibration2_x0 = QtWidgets.QLineEdit(self.tab_4)
@@ -467,7 +495,9 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_y0_label.setPalette(palette)
- self.calibration2_y0_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.calibration2_y0_label.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.calibration2_y0_label.setObjectName("calibration2_y0_label")
self.gridLayout_2.addWidget(self.calibration2_y0_label, 1, 0, 1, 1)
self.calibration2_y0 = QtWidgets.QLineEdit(self.tab_4)
@@ -532,9 +562,13 @@ def setupUi(self, MainWindow):
self.calibration2_index.setPalette(palette)
self.calibration2_index.setObjectName("calibration2_index")
self.horizontalLayout_5.addWidget(self.calibration2_index)
- self.calibration2_display_this_file_button = QtWidgets.QPushButton(self.calibration2_groupbox)
+ self.calibration2_display_this_file_button = QtWidgets.QPushButton(
+ self.calibration2_groupbox
+ )
self.calibration2_display_this_file_button.setMinimumSize(QtCore.QSize(0, 40))
- self.calibration2_display_this_file_button.setMaximumSize(QtCore.QSize(16777215, 40))
+ self.calibration2_display_this_file_button.setMaximumSize(
+ QtCore.QSize(16777215, 40)
+ )
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
@@ -546,7 +580,9 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_display_this_file_button.setPalette(palette)
- self.calibration2_display_this_file_button.setObjectName("calibration2_display_this_file_button")
+ self.calibration2_display_this_file_button.setObjectName(
+ "calibration2_display_this_file_button"
+ )
self.horizontalLayout_5.addWidget(self.calibration2_display_this_file_button)
self.verticalLayout_9.addLayout(self.horizontalLayout_5)
self.label_4 = QtWidgets.QLabel(self.calibration2_groupbox)
@@ -569,9 +605,13 @@ def setupUi(self, MainWindow):
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.verticalLayout_9.addWidget(self.label_4)
- self.calibration2_use_current_file_button = QtWidgets.QPushButton(self.calibration2_groupbox)
+ self.calibration2_use_current_file_button = QtWidgets.QPushButton(
+ self.calibration2_groupbox
+ )
self.calibration2_use_current_file_button.setMinimumSize(QtCore.QSize(0, 40))
- self.calibration2_use_current_file_button.setMaximumSize(QtCore.QSize(16777215, 40))
+ self.calibration2_use_current_file_button.setMaximumSize(
+ QtCore.QSize(16777215, 40)
+ )
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
@@ -583,10 +623,14 @@ def setupUi(self, MainWindow):
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_use_current_file_button.setPalette(palette)
- self.calibration2_use_current_file_button.setObjectName("calibration2_use_current_file_button")
+ self.calibration2_use_current_file_button.setObjectName(
+ "calibration2_use_current_file_button"
+ )
self.verticalLayout_9.addWidget(self.calibration2_use_current_file_button)
self.verticalLayout_6.addWidget(self.calibration2_groupbox)
- spacerItem3 = QtWidgets.QSpacerItem(20, 305, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem3 = QtWidgets.QSpacerItem(
+ 20, 305, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_6.addItem(spacerItem3)
self.tabWidget_2.addTab(self.tab_4, "")
self.verticalLayout_11.addWidget(self.tabWidget_2)
@@ -619,7 +663,9 @@ def setupUi(self, MainWindow):
self.remove_row.setFont(font)
self.remove_row.setObjectName("remove_row")
self.horizontalLayout_6.addWidget(self.remove_row)
- spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem4 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_6.addItem(spacerItem4)
self.add_row = QtWidgets.QPushButton(self.page_2)
self.add_row.setMinimumSize(QtCore.QSize(50, 40))
@@ -660,7 +706,9 @@ def setupUi(self, MainWindow):
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
- spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem5 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem5)
self.export_button = QtWidgets.QPushButton(self.centralwidget)
self.export_button.setObjectName("export_button")
@@ -699,25 +747,55 @@ def setupUi(self, MainWindow):
self.tabWidget_2.setCurrentIndex(0)
self.file_slider.sliderMoved["int"].connect(MainWindow.slider_file_changed)
self.file_slider.valueChanged["int"].connect(MainWindow.slider_file_changed)
- self.previous_image_button.clicked.connect(MainWindow.previous_image_button_clicked)
+ self.previous_image_button.clicked.connect(
+ MainWindow.previous_image_button_clicked
+ )
self.next_image_button.clicked.connect(MainWindow.next_image_button_clicked)
self.export_button.clicked.connect(MainWindow.export_button_clicked)
- self.use_calibration1_checkbox.clicked.connect(MainWindow.use_calibration1_checked)
+ self.use_calibration1_checkbox.clicked.connect(
+ MainWindow.use_calibration1_checked
+ )
self.remove_row.clicked.connect(MainWindow.remove_row_button_clicked)
self.add_row.clicked.connect(MainWindow.add_row_button_clicked)
- self.calibration1_use_current_file_button.clicked.connect(MainWindow.use_current_calibration1_file)
- self.calibration2_use_current_file_button.clicked.connect(MainWindow.use_current_calibration2_file)
- self.calibration1_display_this_file_button.clicked.connect(MainWindow.display_this_cal1_file)
- self.calibration2_display_this_file_button.clicked.connect(MainWindow.display_this_cal2_file)
- self.calibration1_x0.returnPressed.connect(MainWindow.calibration1_widgets_changed)
- self.calibration1_width.returnPressed.connect(MainWindow.calibration1_widgets_changed)
- self.calibration1_y0.returnPressed.connect(MainWindow.calibration1_widgets_changed)
- self.calibration1_height.returnPressed.connect(MainWindow.calibration1_widgets_changed)
- self.calibration2_x0.returnPressed.connect(MainWindow.calibration2_widgets_changed)
- self.calibration2_width.returnPressed.connect(MainWindow.calibration2_widgets_changed)
- self.calibration2_y0.returnPressed.connect(MainWindow.calibration2_widgets_changed)
- self.calibration2_height.returnPressed.connect(MainWindow.calibration2_widgets_changed)
- self.use_calibration2_checkbox.clicked.connect(MainWindow.use_calibration2_checked)
+ self.calibration1_use_current_file_button.clicked.connect(
+ MainWindow.use_current_calibration1_file
+ )
+ self.calibration2_use_current_file_button.clicked.connect(
+ MainWindow.use_current_calibration2_file
+ )
+ self.calibration1_display_this_file_button.clicked.connect(
+ MainWindow.display_this_cal1_file
+ )
+ self.calibration2_display_this_file_button.clicked.connect(
+ MainWindow.display_this_cal2_file
+ )
+ self.calibration1_x0.returnPressed.connect(
+ MainWindow.calibration1_widgets_changed
+ )
+ self.calibration1_width.returnPressed.connect(
+ MainWindow.calibration1_widgets_changed
+ )
+ self.calibration1_y0.returnPressed.connect(
+ MainWindow.calibration1_widgets_changed
+ )
+ self.calibration1_height.returnPressed.connect(
+ MainWindow.calibration1_widgets_changed
+ )
+ self.calibration2_x0.returnPressed.connect(
+ MainWindow.calibration2_widgets_changed
+ )
+ self.calibration2_width.returnPressed.connect(
+ MainWindow.calibration2_widgets_changed
+ )
+ self.calibration2_y0.returnPressed.connect(
+ MainWindow.calibration2_widgets_changed
+ )
+ self.calibration2_height.returnPressed.connect(
+ MainWindow.calibration2_widgets_changed
+ )
+ self.use_calibration2_checkbox.clicked.connect(
+ MainWindow.use_calibration2_checked
+ )
self.pushButton.clicked.connect(MainWindow.help_button_clicked)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
@@ -727,33 +805,58 @@ def retranslateUi(self, MainWindow):
self.previous_image_button.setText(_translate("MainWindow", "Prev. Image"))
self.image_slider_value.setText(_translate("MainWindow", "0"))
self.next_image_button.setText(_translate("MainWindow", "Next Image"))
- self.use_calibration1_checkbox.setText(_translate("MainWindow", "Use Calibration"))
+ self.use_calibration1_checkbox.setText(
+ _translate("MainWindow", "Use Calibration")
+ )
self.calibration1_x0_label.setText(_translate("MainWindow", "X0"))
self.calibration1_width_label.setText(_translate("MainWindow", "width"))
self.calibration1_value_label.setText(_translate("MainWindow", "Value"))
self.calibration1_y0_label.setText(_translate("MainWindow", "y0"))
self.calibration1_height_label.setText(_translate("MainWindow", "height"))
- self.calibration1_groupbox.setTitle(_translate("MainWindow", "File to Use for this Calibration"))
+ self.calibration1_groupbox.setTitle(
+ _translate("MainWindow", "File to Use for this Calibration")
+ )
self.label_38.setText(_translate("MainWindow", "Index"))
self.calibration1_index.setText(_translate("MainWindow", "0"))
- self.calibration1_display_this_file_button.setText(_translate("MainWindow", "Display This File"))
+ self.calibration1_display_this_file_button.setText(
+ _translate("MainWindow", "Display This File")
+ )
self.label_2.setText(_translate("MainWindow", "or"))
- self.calibration1_use_current_file_button.setText(_translate("MainWindow", "Use Current File"))
- self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), _translate("MainWindow", "Calibration 1"))
- self.use_calibration2_checkbox.setText(_translate("MainWindow", "Use Calibration"))
+ self.calibration1_use_current_file_button.setText(
+ _translate("MainWindow", "Use Current File")
+ )
+ self.tabWidget_2.setTabText(
+ self.tabWidget_2.indexOf(self.tab_3),
+ _translate("MainWindow", "Calibration 1"),
+ )
+ self.use_calibration2_checkbox.setText(
+ _translate("MainWindow", "Use Calibration")
+ )
self.calibration2_x0_label.setText(_translate("MainWindow", "X0"))
self.calibration2_width_label.setText(_translate("MainWindow", "width"))
self.calibration2_value_label.setText(_translate("MainWindow", "Value"))
self.calibration2_y0_label.setText(_translate("MainWindow", "y0"))
self.calibration2_height_label.setText(_translate("MainWindow", "height"))
- self.calibration2_groupbox.setTitle(_translate("MainWindow", "File to Use for this Calibration"))
+ self.calibration2_groupbox.setTitle(
+ _translate("MainWindow", "File to Use for this Calibration")
+ )
self.label_45.setText(_translate("MainWindow", "Index"))
self.calibration2_index.setText(_translate("MainWindow", "0"))
- self.calibration2_display_this_file_button.setText(_translate("MainWindow", "Display This File"))
+ self.calibration2_display_this_file_button.setText(
+ _translate("MainWindow", "Display This File")
+ )
self.label_4.setText(_translate("MainWindow", "or"))
- self.calibration2_use_current_file_button.setText(_translate("MainWindow", "Use Current File"))
- self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), _translate("MainWindow", "Calibration 2"))
- self.toolBox.setItemText(self.toolBox.indexOf(self.page), _translate("MainWindow", "Calibration Regions"))
+ self.calibration2_use_current_file_button.setText(
+ _translate("MainWindow", "Use Current File")
+ )
+ self.tabWidget_2.setTabText(
+ self.tabWidget_2.indexOf(self.tab_4),
+ _translate("MainWindow", "Calibration 2"),
+ )
+ self.toolBox.setItemText(
+ self.toolBox.indexOf(self.page),
+ _translate("MainWindow", "Calibration Regions"),
+ )
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "X0"))
item = self.tableWidget.horizontalHeaderItem(1)
@@ -764,20 +867,31 @@ def retranslateUi(self, MainWindow):
item.setText(_translate("MainWindow", "Height"))
self.remove_row.setText(_translate("MainWindow", "-"))
self.add_row.setText(_translate("MainWindow", "+"))
- self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), _translate("MainWindow", "Measurement Regions"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Measurement"))
+ self.toolBox.setItemText(
+ self.toolBox.indexOf(self.page_2),
+ _translate("MainWindow", "Measurement Regions"),
+ )
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Measurement")
+ )
item = self.summary_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Files Name"))
item = self.summary_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Time Stamp"))
item = self.summary_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Relative Time (s)"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Summary"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Summary")
+ )
self.pushButton.setText(_translate("MainWindow", "Help"))
- self.export_button.setText(_translate("MainWindow", "Export Calibrated Transmission ..."))
+ self.export_button.setText(
+ _translate("MainWindow", "Export Calibrated Transmission ...")
+ )
self.actionExport_Profile.setText(_translate("MainWindow", "Profiles ..."))
self.actionWater_Intake.setText(_translate("MainWindow", "Water Intake ..."))
- self.actionImportedFilesMetadata.setText(_translate("MainWindow", "Imported Files and Metadata ..."))
+ self.actionImportedFilesMetadata.setText(
+ _translate("MainWindow", "Imported Files and Metadata ...")
+ )
self.actionBy_Time_Stamp.setText(_translate("MainWindow", "by Time Stamp"))
self.actionBy_File_Name.setText(_translate("MainWindow", "by File Name"))
self.actionDsc_files.setText(_translate("MainWindow", "dsc files ..."))
diff --git a/notebooks/__code/ui_display_counts_of_region_vs_stack.py b/notebooks/__code/ui_display_counts_of_region_vs_stack.py
index e8da981a..944338b5 100755
--- a/notebooks/__code/ui_display_counts_of_region_vs_stack.py
+++ b/notebooks/__code/ui_display_counts_of_region_vs_stack.py
@@ -17,7 +17,9 @@ def setupUi(self, MainWindow):
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.widget = QtWidgets.QWidget(self.centralwidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
@@ -49,12 +51,16 @@ def setupUi(self, MainWindow):
self.distance_source_detector_value = QtWidgets.QLineEdit(self.centralwidget)
self.distance_source_detector_value.setMinimumSize(QtCore.QSize(80, 0))
self.distance_source_detector_value.setMaximumSize(QtCore.QSize(80, 16777215))
- self.distance_source_detector_value.setObjectName("distance_source_detector_value")
+ self.distance_source_detector_value.setObjectName(
+ "distance_source_detector_value"
+ )
self.horizontalLayout_2.addWidget(self.distance_source_detector_value)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_2.addItem(spacerItem)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
@@ -67,7 +73,9 @@ def setupUi(self, MainWindow):
self.detector_offset_units = QtWidgets.QLabel(self.centralwidget)
self.detector_offset_units.setObjectName("detector_offset_units")
self.horizontalLayout_2.addWidget(self.detector_offset_units)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_2.addItem(spacerItem1)
self.horizontalLayout_7.addLayout(self.horizontalLayout_2)
self.verticalLayout.addLayout(self.horizontalLayout_7)
@@ -83,7 +91,9 @@ def setupUi(self, MainWindow):
self.time_spectra_file_browse_button = QtWidgets.QPushButton(self.centralwidget)
self.time_spectra_file_browse_button.setMinimumSize(QtCore.QSize(100, 0))
self.time_spectra_file_browse_button.setMaximumSize(QtCore.QSize(100, 16777215))
- self.time_spectra_file_browse_button.setObjectName("time_spectra_file_browse_button")
+ self.time_spectra_file_browse_button.setObjectName(
+ "time_spectra_file_browse_button"
+ )
self.horizontalLayout_3.addWidget(self.time_spectra_file_browse_button)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
@@ -103,9 +113,15 @@ def setupUi(self, MainWindow):
self.retranslateUi(MainWindow)
self.done_button.clicked.connect(MainWindow.done_button_clicked)
- self.distance_source_detector_value.editingFinished.connect(MainWindow.distance_source_detector_validated)
- self.detector_offset_value.returnPressed.connect(MainWindow.detector_offset_validated)
- self.time_spectra_file_browse_button.clicked.connect(MainWindow.time_spectra_file_browse_button_clicked)
+ self.distance_source_detector_value.editingFinished.connect(
+ MainWindow.distance_source_detector_validated
+ )
+ self.detector_offset_value.returnPressed.connect(
+ MainWindow.detector_offset_validated
+ )
+ self.time_spectra_file_browse_button.clicked.connect(
+ MainWindow.time_spectra_file_browse_button_clicked
+ )
self.file_index_ratio_button.clicked.connect(MainWindow.radio_button_clicked)
self.tof_radio_button.clicked.connect(MainWindow.radio_button_clicked)
self.lambda_radio_button.clicked.connect(MainWindow.radio_button_clicked)
@@ -124,5 +140,7 @@ def retranslateUi(self, MainWindow):
self.detector_offset_units.setText(_translate("MainWindow", "us"))
self.label_5.setText(_translate("MainWindow", "Time Spectra File:"))
self.time_spectra_file.setText(_translate("MainWindow", "N/A"))
- self.time_spectra_file_browse_button.setText(_translate("MainWindow", "Browse ..."))
+ self.time_spectra_file_browse_button.setText(
+ _translate("MainWindow", "Browse ...")
+ )
self.done_button.setText(_translate("MainWindow", "DONE"))
diff --git a/notebooks/__code/ui_dual_energy.py b/notebooks/__code/ui_dual_energy.py
index f57e15ba..c043ab35 100755
--- a/notebooks/__code/ui_dual_energy.py
+++ b/notebooks/__code/ui_dual_energy.py
@@ -16,7 +16,9 @@ def setupUi(self, MainWindow):
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.widget = QtWidgets.QWidget(self.centralwidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
@@ -50,12 +52,16 @@ def setupUi(self, MainWindow):
self.distance_source_detector_value = QtWidgets.QLineEdit(self.groupBox_4)
self.distance_source_detector_value.setMinimumSize(QtCore.QSize(80, 0))
self.distance_source_detector_value.setMaximumSize(QtCore.QSize(80, 16777215))
- self.distance_source_detector_value.setObjectName("distance_source_detector_value")
+ self.distance_source_detector_value.setObjectName(
+ "distance_source_detector_value"
+ )
self.horizontalLayout_4.addWidget(self.distance_source_detector_value)
self.label_2 = QtWidgets.QLabel(self.groupBox_4)
self.label_2.setObjectName("label_2")
self.horizontalLayout_4.addWidget(self.label_2)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_4.addItem(spacerItem)
self.label_3 = QtWidgets.QLabel(self.groupBox_4)
self.label_3.setObjectName("label_3")
@@ -68,7 +74,9 @@ def setupUi(self, MainWindow):
self.detector_offset_units = QtWidgets.QLabel(self.groupBox_4)
self.detector_offset_units.setObjectName("detector_offset_units")
self.horizontalLayout_4.addWidget(self.detector_offset_units)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_4.addItem(spacerItem1)
self.horizontalLayout_7.addWidget(self.groupBox_4)
self.verticalLayout.addLayout(self.horizontalLayout_7)
@@ -79,17 +87,23 @@ def setupUi(self, MainWindow):
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.groupBox_2)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.time_spectra_file = QtWidgets.QLabel(self.groupBox_2)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.time_spectra_file.sizePolicy().hasHeightForWidth())
+ sizePolicy.setHeightForWidth(
+ self.time_spectra_file.sizePolicy().hasHeightForWidth()
+ )
self.time_spectra_file.setSizePolicy(sizePolicy)
self.time_spectra_file.setObjectName("time_spectra_file")
self.horizontalLayout_6.addWidget(self.time_spectra_file)
self.time_spectra_file_browse_button = QtWidgets.QPushButton(self.groupBox_2)
self.time_spectra_file_browse_button.setMinimumSize(QtCore.QSize(100, 0))
self.time_spectra_file_browse_button.setMaximumSize(QtCore.QSize(100, 16777215))
- self.time_spectra_file_browse_button.setObjectName("time_spectra_file_browse_button")
+ self.time_spectra_file_browse_button.setObjectName(
+ "time_spectra_file_browse_button"
+ )
self.horizontalLayout_6.addWidget(self.time_spectra_file_browse_button)
self.horizontalLayout_5.addWidget(self.groupBox_2)
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
@@ -128,12 +142,18 @@ def setupUi(self, MainWindow):
self.retranslateUi(MainWindow)
self.done_button.clicked.connect(MainWindow.done_button_clicked)
- self.distance_source_detector_value.editingFinished.connect(MainWindow.distance_source_detector_validated)
- self.detector_offset_value.returnPressed.connect(MainWindow.detector_offset_validated)
+ self.distance_source_detector_value.editingFinished.connect(
+ MainWindow.distance_source_detector_validated
+ )
+ self.detector_offset_value.returnPressed.connect(
+ MainWindow.detector_offset_validated
+ )
self.file_index_ratio_button.clicked.connect(MainWindow.radio_button_clicked)
self.tof_radio_button.clicked.connect(MainWindow.radio_button_clicked)
self.lambda_radio_button.clicked.connect(MainWindow.radio_button_clicked)
- self.time_spectra_file_browse_button.clicked.connect(MainWindow.time_spectra_file_browse_button_clicked)
+ self.time_spectra_file_browse_button.clicked.connect(
+ MainWindow.time_spectra_file_browse_button_clicked
+ )
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
@@ -150,7 +170,9 @@ def retranslateUi(self, MainWindow):
self.detector_offset_units.setText(_translate("MainWindow", "us"))
self.groupBox_2.setTitle(_translate("MainWindow", "Time Spectra File"))
self.time_spectra_file.setText(_translate("MainWindow", "N/A"))
- self.time_spectra_file_browse_button.setText(_translate("MainWindow", "Browse ..."))
+ self.time_spectra_file_browse_button.setText(
+ _translate("MainWindow", "Browse ...")
+ )
self.groupBox_3.setTitle(_translate("MainWindow", "Bin "))
self.label_4.setText(_translate("MainWindow", "Size"))
self.bin_units.setText(_translate("MainWindow", "files"))
diff --git a/notebooks/__code/ui_file_metadata_display.py b/notebooks/__code/ui_file_metadata_display.py
index 67a3ae72..24b5986a 100755
--- a/notebooks/__code/ui_file_metadata_display.py
+++ b/notebooks/__code/ui_file_metadata_display.py
@@ -21,7 +21,9 @@ def setupUi(self, MainWindow):
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_3.addItem(spacerItem)
self.para_1_label = QtWidgets.QLabel(self.centralwidget)
self.para_1_label.setMinimumSize(QtCore.QSize(30, 0))
@@ -39,14 +41,18 @@ def setupUi(self, MainWindow):
self.para_2_value = QtWidgets.QLabel(self.centralwidget)
self.para_2_value.setObjectName("para_2_value")
self.horizontalLayout_3.addWidget(self.para_2_value)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_3.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.widget_2 = QtWidgets.QWidget(self.centralwidget)
self.widget_2.setObjectName("widget_2")
self.verticalLayout.addWidget(self.widget_2)
self.widget = QtWidgets.QWidget(self.centralwidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
@@ -70,7 +76,9 @@ def setupUi(self, MainWindow):
self.tableWidget.setMinimumSize(QtCore.QSize(300, 0))
self.tableWidget.setMaximumSize(QtCore.QSize(300, 16777215))
self.tableWidget.setAlternatingRowColors(True)
- self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.ContiguousSelection)
+ self.tableWidget.setSelectionMode(
+ QtWidgets.QAbstractItemView.ContiguousSelection
+ )
self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
@@ -88,7 +96,9 @@ def setupUi(self, MainWindow):
self.export_button = QtWidgets.QPushButton(self.centralwidget)
self.export_button.setObjectName("export_button")
self.horizontalLayout.addWidget(self.export_button)
- spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem2 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem2)
self.close_button = QtWidgets.QPushButton(self.centralwidget)
self.close_button.setObjectName("close_button")
@@ -104,7 +114,9 @@ def setupUi(self, MainWindow):
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
- self.tableWidget.itemSelectionChanged.connect(MainWindow.table_widget_selection_changed)
+ self.tableWidget.itemSelectionChanged.connect(
+ MainWindow.table_widget_selection_changed
+ )
self.close_button.clicked.connect(MainWindow.close_clicked)
self.group_slider.sliderMoved["int"].connect(MainWindow.refresh_pyqtgraph)
self.export_button.clicked.connect(MainWindow.export_button_clicked)
diff --git a/notebooks/__code/ui_gamma_filtering_tool.py b/notebooks/__code/ui_gamma_filtering_tool.py
index db940382..cb490e70 100755
--- a/notebooks/__code/ui_gamma_filtering_tool.py
+++ b/notebooks/__code/ui_gamma_filtering_tool.py
@@ -20,7 +20,9 @@ def setupUi(self, MainWindow):
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.image_widget = QtWidgets.QWidget(self.splitter)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.image_widget.sizePolicy().hasHeightForWidth())
@@ -49,7 +51,9 @@ def setupUi(self, MainWindow):
self.file_index_value.setObjectName("file_index_value")
self.horizontalLayout_2.addWidget(self.file_index_value)
self.horizontalLayout_4.addLayout(self.horizontalLayout_2)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_4.addItem(spacerItem)
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setMinimumSize(QtCore.QSize(120, 0))
@@ -65,7 +69,9 @@ def setupUi(self, MainWindow):
self.horizontalLayout_4.addWidget(self.filtering_coefficient_value)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.tableWidget = QtWidgets.QTableWidget(self.layoutWidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tableWidget.sizePolicy().hasHeightForWidth())
@@ -90,7 +96,9 @@ def setupUi(self, MainWindow):
self.cancel_button = QtWidgets.QPushButton(self.layoutWidget)
self.cancel_button.setObjectName("cancel_button")
self.horizontalLayout.addWidget(self.cancel_button)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem1)
self.apply_button = QtWidgets.QPushButton(self.layoutWidget)
self.apply_button.setObjectName("apply_button")
@@ -111,7 +119,9 @@ def setupUi(self, MainWindow):
self.apply_button.clicked.connect(MainWindow.apply_clicked)
self.file_index_slider.sliderPressed.connect(MainWindow.slider_clicked)
self.file_index_slider.valueChanged["int"].connect(MainWindow.slider_moved)
- self.filtering_coefficient_value.returnPressed.connect(MainWindow.filtering_coefficient_changed)
+ self.filtering_coefficient_value.returnPressed.connect(
+ MainWindow.filtering_coefficient_changed
+ )
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
diff --git a/notebooks/__code/ui_integrated_roi_counts_vs_file_name_and_time_stamp.py b/notebooks/__code/ui_integrated_roi_counts_vs_file_name_and_time_stamp.py
index 0b497213..b76381d6 100755
--- a/notebooks/__code/ui_integrated_roi_counts_vs_file_name_and_time_stamp.py
+++ b/notebooks/__code/ui_integrated_roi_counts_vs_file_name_and_time_stamp.py
@@ -54,10 +54,14 @@ def setupUi(self, MainWindow):
self.horizontalLayout_6.addWidget(self.display_size_label)
self.grid_size_slider = QtWidgets.QSlider(self.groupBox)
self.grid_size_slider.setEnabled(False)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.grid_size_slider.sizePolicy().hasHeightForWidth())
+ sizePolicy.setHeightForWidth(
+ self.grid_size_slider.sizePolicy().hasHeightForWidth()
+ )
self.grid_size_slider.setSizePolicy(sizePolicy)
self.grid_size_slider.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.grid_size_slider.setMinimum(1)
@@ -125,7 +129,9 @@ def setupUi(self, MainWindow):
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_7.addItem(spacerItem)
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setObjectName("label")
@@ -215,7 +221,9 @@ def setupUi(self, MainWindow):
self.remove_roi_button.setFont(font)
self.remove_roi_button.setObjectName("remove_roi_button")
self.horizontalLayout_4.addWidget(self.remove_roi_button)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_4.addItem(spacerItem1)
self.add_roi_button = QtWidgets.QPushButton(self.groupBox_2)
font = QtGui.QFont()
@@ -255,7 +263,9 @@ def setupUi(self, MainWindow):
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
- spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem2 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem2)
self.export_button = QtWidgets.QPushButton(self.centralwidget)
self.export_button.setObjectName("export_button")
@@ -292,7 +302,9 @@ def setupUi(self, MainWindow):
self.tabWidget.setCurrentIndex(0)
self.file_slider.sliderMoved["int"].connect(MainWindow.slider_file_changed)
self.file_slider.valueChanged["int"].connect(MainWindow.slider_file_changed)
- self.previous_image_button.clicked.connect(MainWindow.previous_image_button_clicked)
+ self.previous_image_button.clicked.connect(
+ MainWindow.previous_image_button_clicked
+ )
self.next_image_button.clicked.connect(MainWindow.next_image_button_clicked)
self.export_button.clicked.connect(MainWindow.export_button_clicked)
self.pushButton.clicked.connect(MainWindow.help_button_clicked)
@@ -300,12 +312,24 @@ def setupUi(self, MainWindow):
self.add_roi_button.clicked.connect(MainWindow.add_row_button_clicked)
self.grid_display_checkBox.clicked.connect(MainWindow.display_grid_clicked)
self.grid_size_slider.sliderPressed.connect(MainWindow.grid_size_slider_clicked)
- self.grid_size_slider.sliderMoved["int"].connect(MainWindow.grid_size_slider_moved)
- self.transparency_slider.sliderPressed.connect(MainWindow.transparency_slider_clicked)
- self.transparency_slider.sliderMoved["int"].connect(MainWindow.transparency_slider_moved)
- self.tableWidget.itemSelectionChanged.connect(MainWindow.table_widget_selection_changed)
- self.tableWidget.cellChanged["int", "int"].connect(MainWindow.table_widget_cell_changed)
- self.grid_size_slider.sliderReleased.connect(MainWindow.grid_size_slider_released)
+ self.grid_size_slider.sliderMoved["int"].connect(
+ MainWindow.grid_size_slider_moved
+ )
+ self.transparency_slider.sliderPressed.connect(
+ MainWindow.transparency_slider_clicked
+ )
+ self.transparency_slider.sliderMoved["int"].connect(
+ MainWindow.transparency_slider_moved
+ )
+ self.tableWidget.itemSelectionChanged.connect(
+ MainWindow.table_widget_selection_changed
+ )
+ self.tableWidget.cellChanged["int", "int"].connect(
+ MainWindow.table_widget_cell_changed
+ )
+ self.grid_size_slider.sliderReleased.connect(
+ MainWindow.grid_size_slider_released
+ )
self.tabWidget.currentChanged["int"].connect(MainWindow.tab_changed)
self.add_radioButton.clicked.connect(MainWindow.algo_changed)
self.mean_readioButton.clicked.connect(MainWindow.algo_changed)
@@ -317,7 +341,9 @@ def retranslateUi(self, MainWindow):
self.groupBox.setTitle(_translate("MainWindow", "Grid"))
self.grid_display_checkBox.setText(_translate("MainWindow", "Display"))
self.display_size_label.setText(_translate("MainWindow", "Size"))
- self.display_transparency_label.setText(_translate("MainWindow", "Transparency"))
+ self.display_transparency_label.setText(
+ _translate("MainWindow", "Transparency")
+ )
self.previous_image_button.setText(_translate("MainWindow", "Prev. Image"))
self.image_slider_value.setText(_translate("MainWindow", "0"))
self.next_image_button.setText(_translate("MainWindow", "Next Image"))
@@ -335,22 +361,32 @@ def retranslateUi(self, MainWindow):
item.setText(_translate("MainWindow", "Width"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "Height"))
- self.label_3.setText(_translate("MainWindow", "ROI of selected row is displayed in RED"))
+ self.label_3.setText(
+ _translate("MainWindow", "ROI of selected row is displayed in RED")
+ )
self.remove_roi_button.setText(_translate("MainWindow", "-"))
self.add_roi_button.setText(_translate("MainWindow", "+"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Measurement"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Measurement")
+ )
item = self.summary_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Files Name"))
item = self.summary_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Time Stamp"))
item = self.summary_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Relative Time (s)"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Summary"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Summary")
+ )
self.pushButton.setText(_translate("MainWindow", "Help"))
- self.export_button.setText(_translate("MainWindow", "Export Counts vs File Name and Time Stamp ..."))
+ self.export_button.setText(
+ _translate("MainWindow", "Export Counts vs File Name and Time Stamp ...")
+ )
self.actionExport_Profile.setText(_translate("MainWindow", "Profiles ..."))
self.actionWater_Intake.setText(_translate("MainWindow", "Water Intake ..."))
- self.actionImportedFilesMetadata.setText(_translate("MainWindow", "Imported Files and Metadata ..."))
+ self.actionImportedFilesMetadata.setText(
+ _translate("MainWindow", "Imported Files and Metadata ...")
+ )
self.actionBy_Time_Stamp.setText(_translate("MainWindow", "by Time Stamp"))
self.actionBy_File_Name.setText(_translate("MainWindow", "by File Name"))
self.actionDsc_files.setText(_translate("MainWindow", "dsc files ..."))
diff --git a/notebooks/__code/ui_linear_profile.py b/notebooks/__code/ui_linear_profile.py
index 6c7f37e4..d9c4d8af 100755
--- a/notebooks/__code/ui_linear_profile.py
+++ b/notebooks/__code/ui_linear_profile.py
@@ -19,7 +19,9 @@ def setupUi(self, MainWindow):
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.widget = QtWidgets.QWidget(self.splitter)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
@@ -40,7 +42,9 @@ def setupUi(self, MainWindow):
self.file_index_slider.setOrientation(QtCore.Qt.Horizontal)
self.file_index_slider.setObjectName("file_index_slider")
self.horizontalLayout_2.addWidget(self.file_index_slider)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.table_profile = QtWidgets.QTableWidget(self.layoutWidget)
@@ -87,7 +91,9 @@ def setupUi(self, MainWindow):
self.add_profile_2.setFont(font)
self.add_profile_2.setObjectName("add_profile_2")
self.horizontalLayout.addWidget(self.add_profile_2)
- spacerItem1 = QtWidgets.QSpacerItem(408, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 408, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem1)
self.pushButton = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton.setMinimumSize(QtCore.QSize(53, 46))
@@ -96,7 +102,9 @@ def setupUi(self, MainWindow):
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2.addWidget(self.splitter)
- spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem2 = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_2.addItem(spacerItem2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
@@ -144,4 +152,6 @@ def retranslateUi(self, MainWindow):
self.add_profile_2.setText(_translate("MainWindow", "-"))
self.pushButton.setText(_translate("MainWindow", "OK"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
- self.actionExport_Profile.setText(_translate("MainWindow", "Export Profile ..."))
+ self.actionExport_Profile.setText(
+ _translate("MainWindow", "Export Profile ...")
+ )
diff --git a/notebooks/__code/ui_metadata_overlapping_images.py b/notebooks/__code/ui_metadata_overlapping_images.py
index ae1c6573..5e9b3301 100755
--- a/notebooks/__code/ui_metadata_overlapping_images.py
+++ b/notebooks/__code/ui_metadata_overlapping_images.py
@@ -28,10 +28,14 @@ def setupUi(self, MainWindow):
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.pyqtgraph_widget = QtWidgets.QWidget(self.layoutWidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.pyqtgraph_widget.sizePolicy().hasHeightForWidth())
+ sizePolicy.setHeightForWidth(
+ self.pyqtgraph_widget.sizePolicy().hasHeightForWidth()
+ )
self.pyqtgraph_widget.setSizePolicy(sizePolicy)
self.pyqtgraph_widget.setObjectName("pyqtgraph_widget")
self.horizontalLayout_7.addWidget(self.pyqtgraph_widget)
@@ -91,7 +95,9 @@ def setupUi(self, MainWindow):
self.scale_vertical_orientation = QtWidgets.QRadioButton(self.scale_groupbox)
self.scale_vertical_orientation.setObjectName("scale_vertical_orientation")
self.horizontalLayout_2.addWidget(self.scale_vertical_orientation)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
@@ -132,7 +138,9 @@ def setupUi(self, MainWindow):
self.scale_color_combobox.addItem("")
self.scale_color_combobox.addItem("")
self.horizontalLayout_5.addWidget(self.scale_color_combobox)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_5.addItem(spacerItem1)
self.label = QtWidgets.QLabel(self.scale_groupbox)
self.label.setObjectName("label")
@@ -147,7 +155,9 @@ def setupUi(self, MainWindow):
self.horizontalLayout_11.addWidget(self.scale_groupbox)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
- spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem2 = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_5.addItem(spacerItem2)
self.scale_position_label = QtWidgets.QLabel(self.layoutWidget1)
self.scale_position_label.setEnabled(False)
@@ -181,7 +191,9 @@ def setupUi(self, MainWindow):
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.verticalLayout_5.addWidget(self.scale_position_frame)
- spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem3 = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_5.addItem(spacerItem3)
self.horizontalLayout_11.addLayout(self.verticalLayout_5)
self.verticalLayout_4.addLayout(self.horizontalLayout_11)
@@ -259,12 +271,16 @@ def setupUi(self, MainWindow):
self.metadata_color_combobox.addItem("")
self.metadata_color_combobox.addItem("")
self.horizontalLayout_6.addWidget(self.metadata_color_combobox)
- spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem4 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_6.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
- spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem5 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_14.addItem(spacerItem5)
self.import_table_button = QtWidgets.QPushButton(self.metadata_groupbox)
self.import_table_button.setObjectName("import_table_button")
@@ -273,7 +289,9 @@ def setupUi(self, MainWindow):
self.horizontalLayout_13.addWidget(self.metadata_groupbox)
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
- spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem6 = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_8.addItem(spacerItem6)
self.metadata_position_label = QtWidgets.QLabel(self.layoutWidget1)
self.metadata_position_label.setEnabled(False)
@@ -311,7 +329,9 @@ def setupUi(self, MainWindow):
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.verticalLayout_8.addWidget(self.metadata_position_frame)
- spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem7 = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_8.addItem(spacerItem7)
self.enable_graph_checkbox = QtWidgets.QCheckBox(self.layoutWidget1)
self.enable_graph_checkbox.setEnabled(False)
@@ -372,7 +392,9 @@ def setupUi(self, MainWindow):
self.graph_color_combobox.addItem("")
self.graph_color_combobox.addItem("")
self.horizontalLayout_10.addWidget(self.graph_color_combobox)
- spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem8 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_10.addItem(spacerItem8)
self.verticalLayout_9.addLayout(self.horizontalLayout_10)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
@@ -388,11 +410,15 @@ def setupUi(self, MainWindow):
self.metadata_graph_size_slider.setOrientation(QtCore.Qt.Horizontal)
self.metadata_graph_size_slider.setObjectName("metadata_graph_size_slider")
self.horizontalLayout_12.addWidget(self.metadata_graph_size_slider)
- spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem9 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_12.addItem(spacerItem9)
self.verticalLayout_9.addLayout(self.horizontalLayout_12)
self.verticalLayout_8.addWidget(self.graph_groupBox)
- spacerItem10 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem10 = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_8.addItem(spacerItem10)
self.horizontalLayout_13.addLayout(self.verticalLayout_8)
self.verticalLayout_6.addLayout(self.horizontalLayout_13)
@@ -402,7 +428,9 @@ def setupUi(self, MainWindow):
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
- spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem11 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem11)
self.export_button = QtWidgets.QPushButton(self.centralwidget)
self.export_button.setObjectName("export_button")
@@ -439,41 +467,93 @@ def setupUi(self, MainWindow):
self.export_button.clicked.connect(MainWindow.export_button_clicked)
self.pushButton.clicked.connect(MainWindow.help_button_clicked)
self.scale_checkbox.clicked["bool"].connect(MainWindow.scale_checkbox_clicked)
- self.metadata_checkbox.clicked["bool"].connect(MainWindow.metadata_checkbox_clicked)
- self.previous_image_button.clicked.connect(MainWindow.previous_image_button_clicked)
+ self.metadata_checkbox.clicked["bool"].connect(
+ MainWindow.metadata_checkbox_clicked
+ )
+ self.previous_image_button.clicked.connect(
+ MainWindow.previous_image_button_clicked
+ )
self.next_image_button.clicked.connect(MainWindow.next_image_button_clicked)
self.file_slider.sliderPressed.connect(MainWindow.slider_file_clicked)
self.file_slider.valueChanged["int"].connect(MainWindow.slider_file_changed)
- self.select_metadata_checkbox.clicked["bool"].connect(MainWindow.select_metadata_checkbox_clicked)
- self.select_metadata_combobox.currentIndexChanged["int"].connect(MainWindow.metadata_list_changed)
- self.scale_horizontal_orientation.clicked.connect(MainWindow.scale_orientation_clicked)
- self.scale_vertical_orientation.clicked.connect(MainWindow.scale_orientation_clicked)
- self.scale_thickness.valueChanged["int"].connect(MainWindow.scale_thickness_value_changed)
- self.scale_color_combobox.currentIndexChanged["int"].connect(MainWindow.scale_color_changed)
- self.scale_size_spinbox.valueChanged["int"].connect(MainWindow.scale_size_changed)
+ self.select_metadata_checkbox.clicked["bool"].connect(
+ MainWindow.select_metadata_checkbox_clicked
+ )
+ self.select_metadata_combobox.currentIndexChanged["int"].connect(
+ MainWindow.metadata_list_changed
+ )
+ self.scale_horizontal_orientation.clicked.connect(
+ MainWindow.scale_orientation_clicked
+ )
+ self.scale_vertical_orientation.clicked.connect(
+ MainWindow.scale_orientation_clicked
+ )
+ self.scale_thickness.valueChanged["int"].connect(
+ MainWindow.scale_thickness_value_changed
+ )
+ self.scale_color_combobox.currentIndexChanged["int"].connect(
+ MainWindow.scale_color_changed
+ )
+ self.scale_size_spinbox.valueChanged["int"].connect(
+ MainWindow.scale_size_changed
+ )
self.scale_real_size.returnPressed.connect(MainWindow.scale_real_size_changed)
- self.scale_units_combobox.currentIndexChanged["int"].connect(MainWindow.scale_units_changed)
- self.scale_position_x.sliderMoved["int"].connect(MainWindow.scale_position_moved)
+ self.scale_units_combobox.currentIndexChanged["int"].connect(
+ MainWindow.scale_units_changed
+ )
+ self.scale_position_x.sliderMoved["int"].connect(
+ MainWindow.scale_position_moved
+ )
self.scale_position_x.sliderPressed.connect(MainWindow.scale_position_clicked)
- self.metadata_position_x.sliderMoved["int"].connect(MainWindow.metadata_position_moved)
- self.metadata_position_x.sliderPressed.connect(MainWindow.metadata_position_clicked)
- self.metadata_position_y.sliderMoved["int"].connect(MainWindow.metadata_position_moved)
- self.metadata_position_y.sliderPressed.connect(MainWindow.metadata_position_clicked)
- self.scale_position_y.sliderMoved["int"].connect(MainWindow.scale_position_moved)
+ self.metadata_position_x.sliderMoved["int"].connect(
+ MainWindow.metadata_position_moved
+ )
+ self.metadata_position_x.sliderPressed.connect(
+ MainWindow.metadata_position_clicked
+ )
+ self.metadata_position_y.sliderMoved["int"].connect(
+ MainWindow.metadata_position_moved
+ )
+ self.metadata_position_y.sliderPressed.connect(
+ MainWindow.metadata_position_clicked
+ )
+ self.scale_position_y.sliderMoved["int"].connect(
+ MainWindow.scale_position_moved
+ )
self.scale_position_y.sliderPressed.connect(MainWindow.scale_position_clicked)
- self.metadata_color_combobox.currentIndexChanged["int"].connect(MainWindow.metadata_color_changed)
- self.manual_metadata_name.returnPressed.connect(MainWindow.metadata_name_return_pressed)
- self.manual_metadata_units.returnPressed.connect(MainWindow.metadata_name_return_pressed)
- self.metadata_graph_size_slider.sliderPressed.connect(MainWindow.metadata_graph_size_pressed)
- self.metadata_graph_size_slider.sliderMoved["int"].connect(MainWindow.metadata_graph_size_moved)
+ self.metadata_color_combobox.currentIndexChanged["int"].connect(
+ MainWindow.metadata_color_changed
+ )
+ self.manual_metadata_name.returnPressed.connect(
+ MainWindow.metadata_name_return_pressed
+ )
+ self.manual_metadata_units.returnPressed.connect(
+ MainWindow.metadata_name_return_pressed
+ )
+ self.metadata_graph_size_slider.sliderPressed.connect(
+ MainWindow.metadata_graph_size_pressed
+ )
+ self.metadata_graph_size_slider.sliderMoved["int"].connect(
+ MainWindow.metadata_graph_size_moved
+ )
self.import_table_button.clicked.connect(MainWindow.import_table_pressed)
- self.tableWidget.customContextMenuRequested["QPoint"].connect(MainWindow.metadata_table_right_click)
+ self.tableWidget.customContextMenuRequested["QPoint"].connect(
+ MainWindow.metadata_table_right_click
+ )
self.graph_position_y.sliderPressed.connect(MainWindow.graph_position_clicked)
self.graph_position_x.sliderPressed.connect(MainWindow.graph_position_clicked)
- self.graph_position_x.sliderMoved["int"].connect(MainWindow.graph_position_moved)
- self.graph_position_y.sliderMoved["int"].connect(MainWindow.graph_position_moved)
- self.enable_graph_checkbox.stateChanged["int"].connect(MainWindow.enable_graph_button_clicked)
- self.graph_color_combobox.currentIndexChanged["int"].connect(MainWindow.graph_color_changed)
+ self.graph_position_x.sliderMoved["int"].connect(
+ MainWindow.graph_position_moved
+ )
+ self.graph_position_y.sliderMoved["int"].connect(
+ MainWindow.graph_position_moved
+ )
+ self.enable_graph_checkbox.stateChanged["int"].connect(
+ MainWindow.enable_graph_button_clicked
+ )
+ self.graph_color_combobox.currentIndexChanged["int"].connect(
+ MainWindow.graph_color_changed
+ )
self.graph_position_x.sliderReleased.connect(MainWindow.graph_position_clicked)
self.graph_position_y.sliderReleased.connect(MainWindow.graph_position_clicked)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
@@ -486,7 +566,9 @@ def retranslateUi(self, MainWindow):
self.next_image_button.setText(_translate("MainWindow", "Next Image"))
self.scale_checkbox.setText(_translate("MainWindow", "Scale"))
self.label_2.setText(_translate("MainWindow", "Orientation:"))
- self.scale_horizontal_orientation.setText(_translate("MainWindow", "horizontal"))
+ self.scale_horizontal_orientation.setText(
+ _translate("MainWindow", "horizontal")
+ )
self.scale_vertical_orientation.setText(_translate("MainWindow", "vertical"))
self.label_3.setText(_translate("MainWindow", "Size:"))
self.label_4.setText(_translate("MainWindow", "pixels =="))
@@ -502,7 +584,9 @@ def retranslateUi(self, MainWindow):
self.label_8.setText(_translate("MainWindow", "x"))
self.label_9.setText(_translate("MainWindow", "y"))
self.metadata_checkbox.setText(_translate("MainWindow", "Metadata"))
- self.select_metadata_checkbox.setText(_translate("MainWindow", "Select metadata"))
+ self.select_metadata_checkbox.setText(
+ _translate("MainWindow", "Select metadata")
+ )
self.meta_label.setText(_translate("MainWindow", "Legend:"))
self.label_12.setText(_translate("MainWindow", "... [Value] ..."))
item = self.tableWidget.horizontalHeaderItem(0)
@@ -520,7 +604,9 @@ def retranslateUi(self, MainWindow):
self.label_10.setText(_translate("MainWindow", "x"))
self.label_11.setText(_translate("MainWindow", "y"))
self.enable_graph_checkbox.setText(_translate("MainWindow", "Enable Graph"))
- self.metadata_position_label_4.setText(_translate("MainWindow", "Graph Position"))
+ self.metadata_position_label_4.setText(
+ _translate("MainWindow", "Graph Position")
+ )
self.label_15.setText(_translate("MainWindow", "x"))
self.label_16.setText(_translate("MainWindow", "y"))
self.label_13.setText(_translate("MainWindow", "Color:"))
@@ -534,7 +620,9 @@ def retranslateUi(self, MainWindow):
self.export_button.setText(_translate("MainWindow", "Export Images ..."))
self.actionExport_Profile.setText(_translate("MainWindow", "Profiles ..."))
self.actionWater_Intake.setText(_translate("MainWindow", "Water Intake ..."))
- self.actionImportedFilesMetadata.setText(_translate("MainWindow", "Imported Files and Metadata ..."))
+ self.actionImportedFilesMetadata.setText(
+ _translate("MainWindow", "Imported Files and Metadata ...")
+ )
self.actionBy_Time_Stamp.setText(_translate("MainWindow", "by Time Stamp"))
self.actionBy_File_Name.setText(_translate("MainWindow", "by File Name"))
self.actionDsc_files.setText(_translate("MainWindow", "dsc files ..."))
diff --git a/notebooks/__code/ui_metadata_overlapping_images_string_format.py b/notebooks/__code/ui_metadata_overlapping_images_string_format.py
index 33855e32..4683aca2 100755
--- a/notebooks/__code/ui_metadata_overlapping_images_string_format.py
+++ b/notebooks/__code/ui_metadata_overlapping_images_string_format.py
@@ -47,12 +47,16 @@ def setupUi(self, MainWindow):
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem1)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
@@ -68,8 +72,12 @@ def setupUi(self, MainWindow):
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
- self.first_part_lineEdit.textChanged["QString"].connect(MainWindow.string_format_changed)
- self.second_part_lineEdit.textChanged["QString"].connect(MainWindow.string_format_changed)
+ self.first_part_lineEdit.textChanged["QString"].connect(
+ MainWindow.string_format_changed
+ )
+ self.second_part_lineEdit.textChanged["QString"].connect(
+ MainWindow.string_format_changed
+ )
self.pushButton_3.clicked.connect(MainWindow.launch_help)
self.pushButton_2.clicked.connect(MainWindow.cancel)
self.pushButton.clicked.connect(MainWindow.ok)
diff --git a/notebooks/__code/ui_panoramic_stitching.py b/notebooks/__code/ui_panoramic_stitching.py
index 3b7986d0..5ca958a0 100755
--- a/notebooks/__code/ui_panoramic_stitching.py
+++ b/notebooks/__code/ui_panoramic_stitching.py
@@ -28,8 +28,12 @@ def setupUi(self, MainWindow):
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.splitter_between_previews_and_table = QtWidgets.QSplitter(self.tab)
self.splitter_between_previews_and_table.setOrientation(QtCore.Qt.Vertical)
- self.splitter_between_previews_and_table.setObjectName("splitter_between_previews_and_table")
- self.splitter_between_previews = QtWidgets.QSplitter(self.splitter_between_previews_and_table)
+ self.splitter_between_previews_and_table.setObjectName(
+ "splitter_between_previews_and_table"
+ )
+ self.splitter_between_previews = QtWidgets.QSplitter(
+ self.splitter_between_previews_and_table
+ )
self.splitter_between_previews.setOrientation(QtCore.Qt.Horizontal)
self.splitter_between_previews.setObjectName("splitter_between_previews")
self.groupBox_2 = QtWidgets.QGroupBox(self.splitter_between_previews)
@@ -52,7 +56,9 @@ def setupUi(self, MainWindow):
self.target_widget.setObjectName("target_widget")
self.horizontalLayout_2.addWidget(self.target_widget)
self.verticalLayout.addLayout(self.horizontalLayout_2)
- self.tableWidget = QtWidgets.QTableWidget(self.splitter_between_previews_and_table)
+ self.tableWidget = QtWidgets.QTableWidget(
+ self.splitter_between_previews_and_table
+ )
self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 200))
self.tableWidget.setAlternatingRowColors(True)
self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
@@ -108,7 +114,9 @@ def setupUi(self, MainWindow):
self.up_button.setFlat(True)
self.up_button.setObjectName("up_button")
self.gridLayout.addWidget(self.up_button, 1, 3, 1, 1)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.left_left_button = QtWidgets.QPushButton(self.groupBox_4)
self.left_left_button.setStyleSheet("border: None")
@@ -125,7 +133,9 @@ def setupUi(self, MainWindow):
self.left_button.setFlat(True)
self.left_button.setObjectName("left_button")
self.gridLayout.addWidget(self.left_button, 2, 2, 1, 1)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum
+ )
self.gridLayout.addItem(spacerItem1, 2, 3, 1, 1)
self.right_button = QtWidgets.QPushButton(self.groupBox_4)
self.right_button.setStyleSheet("border: None")
@@ -139,7 +149,9 @@ def setupUi(self, MainWindow):
self.right_right_button.setFlat(True)
self.right_right_button.setObjectName("right_right_button")
self.gridLayout.addWidget(self.right_right_button, 2, 5, 1, 1)
- spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem2 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.gridLayout.addItem(spacerItem2, 2, 6, 1, 1)
self.down_button = QtWidgets.QPushButton(self.groupBox_4)
self.down_button.setStyleSheet("border: None")
@@ -157,7 +169,9 @@ def setupUi(self, MainWindow):
self.verticalLayout_7.addWidget(self.groupBox_4)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
- spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem3 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_4.addItem(spacerItem3)
self.label_2 = QtWidgets.QLabel(self.tab_2)
self.label_2.setObjectName("label_2")
@@ -202,7 +216,9 @@ def setupUi(self, MainWindow):
self.cancel_button = QtWidgets.QPushButton(self.centralwidget)
self.cancel_button.setObjectName("cancel_button")
self.horizontalLayout.addWidget(self.cancel_button)
- spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem4 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem4)
self.export_button = QtWidgets.QPushButton(self.centralwidget)
self.export_button.setObjectName("export_button")
@@ -225,8 +241,12 @@ def setupUi(self, MainWindow):
self.tabWidget.setCurrentIndex(0)
self.cancel_button.clicked.connect(MainWindow.cancel_clicked) # type: ignore
self.export_button.clicked.connect(MainWindow.apply_clicked) # type: ignore
- self.tableWidget.itemSelectionChanged.connect(MainWindow.table_widget_selection_changed) # type: ignore
- self.run_stitching_button.clicked.connect(MainWindow.run_stitching_button_clicked) # type: ignore
+ self.tableWidget.itemSelectionChanged.connect(
+ MainWindow.table_widget_selection_changed
+ ) # type: ignore
+ self.run_stitching_button.clicked.connect(
+ MainWindow.run_stitching_button_clicked
+ ) # type: ignore
self.left_button.pressed.connect(MainWindow.left_button_pressed) # type: ignore
self.left_button.released.connect(MainWindow.left_button_released) # type: ignore
self.left_left_button.pressed.connect(MainWindow.left_left_button_pressed) # type: ignore
@@ -257,12 +277,16 @@ def retranslateUi(self, MainWindow):
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Status"))
self.run_stitching_button.setText(_translate("MainWindow", "Run Stitching "))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Step 1"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Step 1")
+ )
self.label.setText(_translate("MainWindow", "File Names"))
self.groupBox_4.setTitle(_translate("MainWindow", "Manual Mode"))
self.label_2.setText(_translate("MainWindow", "->"))
self.label_5.setText(_translate("MainWindow", "->>"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Step 2"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Step 2")
+ )
self.groupBox_3.setTitle(_translate("MainWindow", "Stiched Image"))
self.cancel_button.setText(_translate("MainWindow", "Close"))
self.export_button.setText(_translate("MainWindow", "Export ..."))
diff --git a/notebooks/__code/ui_profile.py b/notebooks/__code/ui_profile.py
index 1b012680..ac144116 100755
--- a/notebooks/__code/ui_profile.py
+++ b/notebooks/__code/ui_profile.py
@@ -55,10 +55,14 @@ def setupUi(self, MainWindow):
self.horizontalLayout_6.addWidget(self.display_size_label)
self.grid_size_slider = QtWidgets.QSlider(self.groupBox)
self.grid_size_slider.setEnabled(False)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.grid_size_slider.sizePolicy().hasHeightForWidth())
+ sizePolicy.setHeightForWidth(
+ self.grid_size_slider.sizePolicy().hasHeightForWidth()
+ )
self.grid_size_slider.setSizePolicy(sizePolicy)
self.grid_size_slider.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.grid_size_slider.setMinimum(1)
@@ -132,7 +136,9 @@ def setupUi(self, MainWindow):
self.label_6.setObjectName("label_6")
self.horizontalLayout_8.addWidget(self.label_6)
self.filename = QtWidgets.QLabel(self.layoutWidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.filename.sizePolicy().hasHeightForWidth())
@@ -268,7 +274,9 @@ def setupUi(self, MainWindow):
self.pushButton_4.setFont(font)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout_4.addWidget(self.pushButton_4)
- spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_4.addItem(spacerItem)
self.groupBox_2 = QtWidgets.QGroupBox(self.layoutWidget1)
self.groupBox_2.setObjectName("groupBox_2")
@@ -283,7 +291,9 @@ def setupUi(self, MainWindow):
self.profile_direction_y_axis.setObjectName("profile_direction_y_axis")
self.horizontalLayout_7.addWidget(self.profile_direction_y_axis)
self.horizontalLayout_4.addWidget(self.groupBox_2)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout_4.addItem(spacerItem1)
self.pushButton_5 = QtWidgets.QPushButton(self.layoutWidget1)
font = QtGui.QFont()
@@ -316,10 +326,16 @@ def setupUi(self, MainWindow):
self.all_plots_verti_splitter = QtWidgets.QSplitter(self.layoutWidget2)
self.all_plots_verti_splitter.setOrientation(QtCore.Qt.Vertical)
self.all_plots_verti_splitter.setObjectName("all_plots_verti_splitter")
- self.all_plots_file_name_table = QtWidgets.QTableWidget(self.all_plots_verti_splitter)
+ self.all_plots_file_name_table = QtWidgets.QTableWidget(
+ self.all_plots_verti_splitter
+ )
self.all_plots_file_name_table.setAlternatingRowColors(True)
- self.all_plots_file_name_table.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
- self.all_plots_file_name_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
+ self.all_plots_file_name_table.setSelectionMode(
+ QtWidgets.QAbstractItemView.MultiSelection
+ )
+ self.all_plots_file_name_table.setSelectionBehavior(
+ QtWidgets.QAbstractItemView.SelectRows
+ )
self.all_plots_file_name_table.setObjectName("all_plots_file_name_table")
self.all_plots_file_name_table.setColumnCount(1)
self.all_plots_file_name_table.setRowCount(0)
@@ -327,10 +343,16 @@ def setupUi(self, MainWindow):
self.all_plots_file_name_table.setHorizontalHeaderItem(0, item)
self.all_plots_file_name_table.horizontalHeader().setStretchLastSection(True)
self.all_plots_file_name_table.verticalHeader().setStretchLastSection(False)
- self.all_plots_profiles_table = QtWidgets.QTableWidget(self.all_plots_verti_splitter)
+ self.all_plots_profiles_table = QtWidgets.QTableWidget(
+ self.all_plots_verti_splitter
+ )
self.all_plots_profiles_table.setAlternatingRowColors(True)
- self.all_plots_profiles_table.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
- self.all_plots_profiles_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
+ self.all_plots_profiles_table.setSelectionMode(
+ QtWidgets.QAbstractItemView.MultiSelection
+ )
+ self.all_plots_profiles_table.setSelectionBehavior(
+ QtWidgets.QAbstractItemView.SelectRows
+ )
self.all_plots_profiles_table.setObjectName("all_plots_profiles_table")
self.all_plots_profiles_table.setColumnCount(1)
self.all_plots_profiles_table.setRowCount(0)
@@ -366,7 +388,9 @@ def setupUi(self, MainWindow):
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
- spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem2 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem2)
self.export_button = QtWidgets.QPushButton(self.centralwidget)
self.export_button.setObjectName("export_button")
@@ -403,7 +427,9 @@ def setupUi(self, MainWindow):
self.tabWidget.setCurrentIndex(0)
self.file_slider.sliderMoved["int"].connect(MainWindow.slider_file_changed) # type: ignore
self.file_slider.valueChanged["int"].connect(MainWindow.slider_file_changed) # type: ignore
- self.previous_image_button.clicked.connect(MainWindow.previous_image_button_clicked) # type: ignore
+ self.previous_image_button.clicked.connect(
+ MainWindow.previous_image_button_clicked
+ ) # type: ignore
self.next_image_button.clicked.connect(MainWindow.next_image_button_clicked) # type: ignore
self.export_button.clicked.connect(MainWindow.export_button_clicked) # type: ignore
self.pushButton.clicked.connect(MainWindow.help_button_clicked) # type: ignore
@@ -411,22 +437,52 @@ def setupUi(self, MainWindow):
self.pushButton_5.clicked.connect(MainWindow.add_row_button_clicked) # type: ignore
self.grid_display_checkBox.clicked.connect(MainWindow.display_grid_clicked) # type: ignore
self.grid_size_slider.sliderPressed.connect(MainWindow.grid_size_slider_clicked) # type: ignore
- self.grid_size_slider.sliderMoved["int"].connect(MainWindow.grid_size_slider_moved) # type: ignore
- self.transparency_slider.sliderPressed.connect(MainWindow.transparency_slider_clicked) # type: ignore
- self.transparency_slider.sliderMoved["int"].connect(MainWindow.transparency_slider_moved) # type: ignore
- self.right_rotation_button_fast.clicked.connect(MainWindow.right_rotation_fast_clicked) # type: ignore
- self.right_rotation_button_slow.clicked.connect(MainWindow.right_rotation_slow_clicked) # type: ignore
- self.left_rotation_button_fast.clicked.connect(MainWindow.left_rotation_fast_clicked) # type: ignore
- self.left_rotation_button_slow.clicked.connect(MainWindow.left_rotation_slow_clicked) # type: ignore
- self.tableWidget.itemSelectionChanged.connect(MainWindow.table_widget_selection_changed) # type: ignore
- self.tableWidget_2.itemSelectionChanged.connect(MainWindow.table_widget_2_selection_changed) # type: ignore
- self.tableWidget.cellChanged["int", "int"].connect(MainWindow.table_widget_cell_changed) # type: ignore
- self.grid_size_slider.sliderReleased.connect(MainWindow.grid_size_slider_released) # type: ignore
- self.profile_direction_x_axis.clicked.connect(MainWindow.profile_along_axis_changed) # type: ignore
- self.profile_direction_y_axis.clicked.connect(MainWindow.profile_along_axis_changed) # type: ignore
+ self.grid_size_slider.sliderMoved["int"].connect(
+ MainWindow.grid_size_slider_moved
+ ) # type: ignore
+ self.transparency_slider.sliderPressed.connect(
+ MainWindow.transparency_slider_clicked
+ ) # type: ignore
+ self.transparency_slider.sliderMoved["int"].connect(
+ MainWindow.transparency_slider_moved
+ ) # type: ignore
+ self.right_rotation_button_fast.clicked.connect(
+ MainWindow.right_rotation_fast_clicked
+ ) # type: ignore
+ self.right_rotation_button_slow.clicked.connect(
+ MainWindow.right_rotation_slow_clicked
+ ) # type: ignore
+ self.left_rotation_button_fast.clicked.connect(
+ MainWindow.left_rotation_fast_clicked
+ ) # type: ignore
+ self.left_rotation_button_slow.clicked.connect(
+ MainWindow.left_rotation_slow_clicked
+ ) # type: ignore
+ self.tableWidget.itemSelectionChanged.connect(
+ MainWindow.table_widget_selection_changed
+ ) # type: ignore
+ self.tableWidget_2.itemSelectionChanged.connect(
+ MainWindow.table_widget_2_selection_changed
+ ) # type: ignore
+ self.tableWidget.cellChanged["int", "int"].connect(
+ MainWindow.table_widget_cell_changed
+ ) # type: ignore
+ self.grid_size_slider.sliderReleased.connect(
+ MainWindow.grid_size_slider_released
+ ) # type: ignore
+ self.profile_direction_x_axis.clicked.connect(
+ MainWindow.profile_along_axis_changed
+ ) # type: ignore
+ self.profile_direction_y_axis.clicked.connect(
+ MainWindow.profile_along_axis_changed
+ ) # type: ignore
self.tabWidget.currentChanged["int"].connect(MainWindow.tab_changed) # type: ignore
- self.all_plots_file_name_table.itemSelectionChanged.connect(MainWindow.update_all_plots) # type: ignore
- self.all_plots_profiles_table.itemSelectionChanged.connect(MainWindow.update_all_plots) # type: ignore
+ self.all_plots_file_name_table.itemSelectionChanged.connect(
+ MainWindow.update_all_plots
+ ) # type: ignore
+ self.all_plots_profiles_table.itemSelectionChanged.connect(
+ MainWindow.update_all_plots
+ ) # type: ignore
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
@@ -435,7 +491,9 @@ def retranslateUi(self, MainWindow):
self.groupBox.setTitle(_translate("MainWindow", "Grid"))
self.grid_display_checkBox.setText(_translate("MainWindow", "Display"))
self.display_size_label.setText(_translate("MainWindow", "Size"))
- self.display_transparency_label.setText(_translate("MainWindow", "Transparency"))
+ self.display_transparency_label.setText(
+ _translate("MainWindow", "Transparency")
+ )
self.previous_image_button.setText(_translate("MainWindow", "Prev. Image"))
self.image_slider_value.setText(_translate("MainWindow", "0"))
self.next_image_button.setText(_translate("MainWindow", "Next Image"))
@@ -455,20 +513,30 @@ def retranslateUi(self, MainWindow):
item.setText(_translate("MainWindow", "Height"))
item = self.tableWidget_2.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Width"))
- self.label_3.setText(_translate("MainWindow", "ROI of selected row is displayed in RED"))
+ self.label_3.setText(
+ _translate("MainWindow", "ROI of selected row is displayed in RED")
+ )
self.pushButton_4.setText(_translate("MainWindow", "-"))
self.groupBox_2.setTitle(_translate("MainWindow", "Profile Direction"))
self.profile_direction_x_axis.setText(_translate("MainWindow", "x-axis"))
self.profile_direction_y_axis.setText(_translate("MainWindow", "y-axis"))
self.pushButton_5.setText(_translate("MainWindow", "+"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Measurement"))
- self.label_5.setText(_translate("MainWindow", "Select the FILE(s) and the PROFILE(s) you want to display!"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Measurement")
+ )
+ self.label_5.setText(
+ _translate(
+ "MainWindow",
+ "Select the FILE(s) and the PROFILE(s) you want to display!",
+ )
+ )
item = self.all_plots_file_name_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "File Names"))
item = self.all_plots_profiles_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Profiles"))
self.tabWidget.setTabText(
- self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "All Profiles / All Images")
+ self.tabWidget.indexOf(self.tab_3),
+ _translate("MainWindow", "All Profiles / All Images"),
)
item = self.summary_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Files Name"))
@@ -476,12 +544,16 @@ def retranslateUi(self, MainWindow):
item.setText(_translate("MainWindow", "Time Stamp"))
item = self.summary_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Relative Time (s)"))
- self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Summary"))
+ self.tabWidget.setTabText(
+ self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Summary")
+ )
self.pushButton.setText(_translate("MainWindow", "Help"))
self.export_button.setText(_translate("MainWindow", "Export Profiles ..."))
self.actionExport_Profile.setText(_translate("MainWindow", "Profiles ..."))
self.actionWater_Intake.setText(_translate("MainWindow", "Water Intake ..."))
- self.actionImportedFilesMetadata.setText(_translate("MainWindow", "Imported Files and Metadata ..."))
+ self.actionImportedFilesMetadata.setText(
+ _translate("MainWindow", "Imported Files and Metadata ...")
+ )
self.actionBy_Time_Stamp.setText(_translate("MainWindow", "by Time Stamp"))
self.actionBy_File_Name.setText(_translate("MainWindow", "by File Name"))
self.actionDsc_files.setText(_translate("MainWindow", "dsc files ..."))
diff --git a/notebooks/__code/ui_radial_profile.py b/notebooks/__code/ui_radial_profile.py
index fa41659c..0cceda0d 100755
--- a/notebooks/__code/ui_radial_profile.py
+++ b/notebooks/__code/ui_radial_profile.py
@@ -24,7 +24,9 @@ def setupUi(self, MainWindow):
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.widget = QtWidgets.QWidget(self.tab_2)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
@@ -34,7 +36,9 @@ def setupUi(self, MainWindow):
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
@@ -65,7 +69,8 @@ def setupUi(self, MainWindow):
self.verticalLayout_3.addWidget(self.groupBox_2)
self.groupBox_3 = QtWidgets.QGroupBox(self.tab_2)
sizePolicy = QtWidgets.QSizePolicy(
- QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding
+ QtWidgets.QSizePolicy.MinimumExpanding,
+ QtWidgets.QSizePolicy.MinimumExpanding,
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
@@ -91,7 +96,9 @@ def setupUi(self, MainWindow):
self.sector_from_value.setMinimumSize(QtCore.QSize(30, 0))
self.sector_from_value.setMaximumSize(QtCore.QSize(30, 16777215))
self.sector_from_value.setText("")
- self.sector_from_value.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.sector_from_value.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.sector_from_value.setObjectName("sector_from_value")
self.gridLayout_2.addWidget(self.sector_from_value, 0, 2, 1, 1)
self.sector_to_label = QtWidgets.QLabel(self.groupBox_3)
@@ -102,10 +109,14 @@ def setupUi(self, MainWindow):
self.gridLayout_2.addWidget(self.sector_from_label, 0, 0, 1, 1)
self.from_angle_slider = QtWidgets.QScrollBar(self.groupBox_3)
self.from_angle_slider.setEnabled(False)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.from_angle_slider.sizePolicy().hasHeightForWidth())
+ sizePolicy.setHeightForWidth(
+ self.from_angle_slider.sizePolicy().hasHeightForWidth()
+ )
self.from_angle_slider.setSizePolicy(sizePolicy)
self.from_angle_slider.setMaximum(360)
self.from_angle_slider.setOrientation(QtCore.Qt.Horizontal)
@@ -113,7 +124,9 @@ def setupUi(self, MainWindow):
self.gridLayout_2.addWidget(self.from_angle_slider, 0, 1, 1, 1)
self.sector_to_value = QtWidgets.QLabel(self.groupBox_3)
self.sector_to_value.setText("")
- self.sector_to_value.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
+ self.sector_to_value.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
+ )
self.sector_to_value.setObjectName("sector_to_value")
self.gridLayout_2.addWidget(self.sector_to_value, 1, 2, 1, 1)
self.to_angle_slider = QtWidgets.QScrollBar(self.groupBox_3)
@@ -182,12 +195,16 @@ def setupUi(self, MainWindow):
self.grid_size_slider.setObjectName("grid_size_slider")
self.gridLayout_3.addWidget(self.grid_size_slider, 4, 1, 1, 1)
self.verticalLayout_3.addWidget(self.groupBox_4)
- spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ spacerItem = QtWidgets.QSpacerItem(
+ 20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
self.verticalLayout_3.addItem(spacerItem)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.groupBox = QtWidgets.QGroupBox(self.tab_2)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
@@ -200,7 +217,9 @@ def setupUi(self, MainWindow):
self.verticalLayout.setObjectName("verticalLayout")
self.textBrowser = QtWidgets.QTextBrowser(self.groupBox)
self.textBrowser.setEnabled(False)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textBrowser.sizePolicy().hasHeightForWidth())
@@ -219,10 +238,14 @@ def setupUi(self, MainWindow):
self.calculate_profiles_button.setObjectName("calculate_profiles_button")
self.verticalLayout_6.addWidget(self.calculate_profiles_button)
self.widget_profile = QtWidgets.QWidget(self.tab)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy = QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
+ )
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.widget_profile.sizePolicy().hasHeightForWidth())
+ sizePolicy.setHeightForWidth(
+ self.widget_profile.sizePolicy().hasHeightForWidth()
+ )
self.widget_profile.setSizePolicy(sizePolicy)
self.widget_profile.setObjectName("widget_profile")
self.verticalLayout_6.addWidget(self.widget_profile)
@@ -237,7 +260,9 @@ def setupUi(self, MainWindow):
self.help_button = QtWidgets.QPushButton(self.centralwidget)
self.help_button.setObjectName("help_button")
self.horizontalLayout.addWidget(self.help_button)
- spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ spacerItem1 = QtWidgets.QSpacerItem(
+ 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
+ )
self.horizontalLayout.addItem(spacerItem1)
self.cancel_button = QtWidgets.QPushButton(self.centralwidget)
self.cancel_button.setObjectName("cancel_button")
@@ -258,16 +283,30 @@ def setupUi(self, MainWindow):
self.sector_full_circle.clicked.connect(MainWindow.sector_radio_button_changed)
self.sector_sector.clicked.connect(MainWindow.sector_radio_button_changed)
self.guide_red_slider.sliderMoved["int"].connect(MainWindow.guide_color_changed)
- self.guide_green_slider.sliderMoved["int"].connect(MainWindow.guide_color_changed)
- self.guide_blue_slider.sliderMoved["int"].connect(MainWindow.guide_color_changed)
- self.guide_alpha_slider.sliderMoved["int"].connect(MainWindow.guide_color_changed)
+ self.guide_green_slider.sliderMoved["int"].connect(
+ MainWindow.guide_color_changed
+ )
+ self.guide_blue_slider.sliderMoved["int"].connect(
+ MainWindow.guide_color_changed
+ )
+ self.guide_alpha_slider.sliderMoved["int"].connect(
+ MainWindow.guide_color_changed
+ )
self.grid_size_slider.sliderMoved["int"].connect(MainWindow.grid_slider_moved)
self.grid_size_slider.sliderPressed.connect(MainWindow.grid_slider_pressed)
- self.from_angle_slider.sliderMoved["int"].connect(MainWindow.sector_from_angle_moved)
- self.to_angle_slider.sliderMoved["int"].connect(MainWindow.sector_to_angle_moved)
- self.from_angle_slider.sliderPressed.connect(MainWindow.sector_from_angle_clicked)
+ self.from_angle_slider.sliderMoved["int"].connect(
+ MainWindow.sector_from_angle_moved
+ )
+ self.to_angle_slider.sliderMoved["int"].connect(
+ MainWindow.sector_to_angle_moved
+ )
+ self.from_angle_slider.sliderPressed.connect(
+ MainWindow.sector_from_angle_clicked
+ )
self.to_angle_slider.sliderPressed.connect(MainWindow.sector_to_angle_clicked)
- self.from_angle_slider.sliderReleased.connect(MainWindow.sector_from_angle_clicked)
+ self.from_angle_slider.sliderReleased.connect(
+ MainWindow.sector_from_angle_clicked
+ )
self.to_angle_slider.sliderReleased.connect(MainWindow.sector_to_angle_clicked)
self.guide_red_slider.sliderPressed.connect(MainWindow.guide_color_clicked)
self.guide_red_slider.sliderReleased.connect(MainWindow.guide_color_released)
@@ -278,9 +317,15 @@ def setupUi(self, MainWindow):
self.guide_alpha_slider.sliderPressed.connect(MainWindow.guide_color_clicked)
self.guide_alpha_slider.sliderReleased.connect(MainWindow.guide_color_released)
self.grid_size_slider.sliderReleased.connect(MainWindow.grid_slider_pressed)
- self.from_angle_slider.valueChanged["int"].connect(MainWindow.sector_from_angle_moved)
- self.to_angle_slider.valueChanged["int"].connect(MainWindow.sector_to_angle_moved)
- self.calculate_profiles_button.clicked.connect(MainWindow.calculate_profiles_clicked)
+ self.from_angle_slider.valueChanged["int"].connect(
+ MainWindow.sector_from_angle_moved
+ )
+ self.to_angle_slider.valueChanged["int"].connect(
+ MainWindow.sector_to_angle_moved
+ )
+ self.calculate_profiles_button.clicked.connect(
+ MainWindow.calculate_profiles_clicked
+ )
self.export_profiles_button.clicked.connect(MainWindow.export_profiles_clicked)
self.help_button.clicked.connect(MainWindow.help_button_clicked)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
@@ -317,9 +362,18 @@ def retranslateUi(self, MainWindow):
'* Export profiles