DUNE-DAQ
DUNE Trigger and Data Acquisition software
Loading...
Searching...
No Matches
TCReader.py
Go to the documentation of this file.
1"""
2Reader class for TC data.
3"""
4from .HDF5Reader import HDF5Reader
5
6import daqdataformats # noqa: F401 : Not used, but needed to recognize formats.
7import trgdataformats
8
9import numpy as np
10from numpy.typing import NDArray
11
12
14 """
15 Class that reads a given HDF5 data file and can
16 process the TC fragments within.
17
18 Loading fragments appends to :self.tc_data: and :self.ta_data:.
19 NumPy dtypes of :self.tc_data: and :self.ta_data: are available
20 as :TCReader.tc_dt: and :TCReader.ta_dt:.
21
22 TC reading can print information that is relevant about the
23 loading process by specifying the verbose level. 0 for errors
24 only. 1 for warnings. 2 for all information.
25 """
26 # TC data type
27 tc_dt = np.dtype([
28 ('algorithm', trgdataformats.TriggerCandidateData.Algorithm),
29 ('detid', np.uint16),
30 ('num_tas', np.uint64), # Greedy
31 ('time_candidate', np.uint64),
32 ('time_end', np.uint64),
33 ('time_start', np.uint64),
34 ('type', trgdataformats.TriggerCandidateData.Type),
35 ('version', np.uint16),
36 ])
37
38 # TA data type
39 ta_dt = np.dtype([
40 ('adc_integral', np.uint64),
41 ('adc_peak', np.uint64),
42 ('algorithm', trgdataformats.TriggerActivityData.Algorithm),
43 ('channel_end', np.int32),
44 ('channel_peak', np.int32),
45 ('channel_start', np.int32),
46 ('detid', np.uint16),
47 ('time_activity', np.uint64),
48 ('time_end', np.uint64),
49 ('time_peak', np.uint64),
50 ('time_start', np.uint64),
51 ('type', trgdataformats.TriggerActivityData.Type),
52 ('version', np.uint16)
53 ])
54
55 def __init__(self, filename: str, verbosity: int = 0, batch_mode: bool = False) -> None:
56 """
57 Loads a given HDF5 file.
58
59 Parameters:
60 filename (str): HDF5 file to open.
61 verbosity (int): Verbose level. 0: Only errors. 1: Warnings. 2: All.
62
63 Returns nothing.
64 """
65 super().__init__(filename, verbosity, batch_mode)
66 self.tc_data = np.array([], dtype=self.tc_dt) # Will concatenate new TCs
67 self.ta_data = [] # ta_data[i] will be a np.ndarray of TAs from the i-th TC
68 return None
69
70 def __getitem__(self, key: int | str) -> NDArray[tc_dt]:
71 return self.tc_data[key]
72
73 def __setitem__(self, key: int | str, value: NDArray[tc_dt]) -> None:
74 self.tc_data[key] = value
75 return
76
77 def __len__(self) -> int:
78 return len(self.tc_data)
79
80 def _filter_fragment_paths(self) -> None:
81 """ Filter the fragment paths for TCs. """
82 fragment_paths = []
83
84 # TC fragment paths contain their name in the path.
86 if "Trigger_Candidate" in path:
87 fragment_paths.append(path)
88
89 self._fragment_paths_fragment_paths = fragment_paths
90 return None
91
92 def read_fragment(self, fragment_path: str) -> NDArray:
93 """
94 Read from the given data fragment path.
95
96 Returns a np.ndarray of the first TC that was read and appends all TCs in the fragment to :self.tc_data:.
97 """
98 if self._verbosity >= 2:
99 print("="*60)
100 print(f"INFO: Reading from the path\n{fragment_path}")
101
102 fragment = self._h5_file.get_frag(fragment_path)
103 fragment_data_size = fragment.get_data_size()
104
105 if fragment_data_size == 0: # Empty fragment
106 self._num_empty += 1
107 if self._verbosity >= 1:
108 print(
110 + self._BOLD_TEXT
111 + "WARNING: Empty fragment."
112 + self._END_TEXT_COLOR
113 )
114 print("="*60)
115 return np.array([], dtype=self.tc_dt)
116
117 tc_idx = 0 # Debugging output.
118 byte_idx = 0 # Variable TC sizing, must do a while loop.
119 while byte_idx < fragment_data_size:
120 if self._verbosity >= 2:
121 print(f"INFO: Fragment Index: {tc_idx}.")
122 tc_idx += 1
123 print(f"INFO: Byte Index / Frag Size: {byte_idx} / {fragment_data_size}")
124
125 # Process TC data
126 tc_datum = trgdataformats.TriggerCandidate(fragment.get_data(byte_idx))
127 np_tc_datum = np.array([(
128 tc_datum.data.algorithm,
129 tc_datum.data.detid,
130 tc_datum.n_inputs(),
131 tc_datum.data.time_candidate,
132 tc_datum.data.time_end,
133 tc_datum.data.time_start,
134 tc_datum.data.type,
135 tc_datum.data.version)],
136 dtype=self.tc_dt)
137
138 self.tc_data = np.hstack((self.tc_data, np_tc_datum))
139
140 byte_idx += tc_datum.sizeof()
141 if self._verbosity >= 2:
142 print(f"INFO: Upcoming byte index: {byte_idx}.")
143
144 # Process TA data
145 np_ta_data = np.zeros(np_tc_datum['num_tas'], dtype=self.ta_dt)
146 for ta_idx, ta in enumerate(tc_datum):
147 np_ta_data[ta_idx] = np.array([(
148 ta.adc_integral,
149 ta.adc_peak,
150 ta.algorithm,
151 ta.channel_end,
152 ta.channel_peak,
153 ta.channel_start,
154 np.uint16(ta.detid),
155 ta.time_activity,
156 ta.time_end,
157 ta.time_peak,
158 ta.time_start,
159 ta.type,
160 ta.version)],
161 dtype=self.ta_dt)
162 self.ta_data.append(np_ta_data) # Jagged array
163
164 if self._verbosity >= 2:
165 print("INFO: Finished reading.")
166 print("="*60)
167 return np_tc_datum
168
169 def clear_data(self) -> None:
170 self.tc_data = np.array([], dtype=self.tc_dt)
171 self.ta_data = []
NDArray read_fragment(self, str fragment_path)
Definition TCReader.py:92
None _filter_fragment_paths(self)
Definition TCReader.py:80
None __init__(self, str filename, int verbosity=0, bool batch_mode=False)
Definition TCReader.py:55
NDArray[tc_dt] __getitem__(self, int|str key)
Definition TCReader.py:70
None __setitem__(self, int|str key, NDArray[tc_dt] value)
Definition TCReader.py:73