DUNE-DAQ
DUNE Trigger and Data Acquisition software
Loading...
Searching...
No Matches
dataclasses.py
Go to the documentation of this file.
1from dataclasses import dataclass, field
2import typing
3from datetime import datetime
4import pytz
5import numpy as np
6
7import daqdataformats
8import detdataformats
9import fddetdataformats
10import trgdataformats
11import detchannelmaps
12
14 return dts*16 //1e9
15
16def dts_to_datetime(dts_timestamp):
17 return datetime.fromtimestamp(dts_to_seconds(dts_timestamp), tz=pytz.timezone("UTC"))
18
19
20
22 # Find indices where the value changes compared to the previous value, adjusting the first index to start from 0
23 change_locations = np.insert(np.where(arr[1:] != arr[:-1])[0], 0, -1) + 1
24 # Check if the array is not empty
25 if len(arr) > 0:
26 # Return locations of changes, values at these locations, and the length of the array
27 return change_locations, arr[change_locations], len(arr)
28 else:
29 # Return empty results for an empty array
30 return [], [], 0
31
32def desparsify_array_diff_locs_and_vals(change_locations, change_values, arr_size):
33 # Create an empty array of the original size
34 reconstructed_arr = np.empty(arr_size, dtype=np.uint)
35 # Loop through each change location
36 for i in range(len(change_locations)):
37 # Apply the change value from the current change location to the end or next change location
38 if (i + 1) == len(change_locations):
39 reconstructed_arr[change_locations[i]:] = change_values[i]
40 else:
41 reconstructed_arr[change_locations[i]:change_locations[i + 1]] = change_values[i]
42 return reconstructed_arr
43
45 # Store the first value of the array for later reconstruction
46 arr_first = arr[0]
47 # Compute the difference of consecutive elements
48 arr_diff = np.diff(arr)
49 # Use sparsify function to find locations and values of changes in the diff array
50 arr_diff_locs, arr_diff_vals, _ = sparsify_array_diff_locs_and_vals(arr_diff)
51 # Return the first value, change locations, change values, and array size for reconstruction
52 return arr_first, arr_diff_locs, arr_diff_vals, len(arr)
53
54def desparsify_array_diff_of_diff_locs_and_vals(arr_first, change_locations, change_values, arr_size):
55 # Reconstruct the differential array from sparse representation
56 arr_diff = desparsify_array_diff_locs_and_vals(change_locations, change_values, arr_size - 1)
57 # Reconstruct the original array by cumulatively summing the differences and adding the first value
58 arr = np.concatenate((np.array([0], dtype=np.uint), arr_diff)).cumsum() + arr_first
59 return arr
60
61@dataclass(order=True)
63 run: int
64 trigger: int
65 sequence: int
66
67 @classmethod
68 def index_names(cls):
69 return [ "run","trigger","sequence" ]
70
71 def index_values(self):
72 return [ self.run, self.trigger, self.sequence ]
73
74 def __str__(self):
75 return f"{self.__class__.__name__}({', '.join(f'{name}={getattr(self, name)}' for name in self.index_names())})"
76
77@dataclass(order=True)
79 src_id: int
80 subsystem: int
81 subsystem_str: str
82 version: int
83
84 def __str__(self):
85 base_str = super().__str__()
86
87 additional_fields = [f"src_id={self.src_id}",
88 f"subsystem={self.subsystem} ('{self.subsystem_str}')",
89 f"version={self.version}"]
90 return f"{base_str}: [{', '.join(additional_fields)}]"
91
92@dataclass(order=True)
94 src_id: int
95
96 @classmethod
97 def index_names(cls):
98 return [ "run","trigger","sequence","src_id" ]
99
100 def index_values(self):
101 return [ self.runrun, self.triggertrigger, self.sequence, self.src_id ]
102
103
104@dataclass(order=True)
106
107 trigger_timestamp_dts: int
108 n_fragments: int
109 n_requested_components: int
110 error_bits: int
111 trigger_type: int
112 max_sequence_number: int
113 total_size_bytes: int
114 trigger_time : datetime = field(init=False)
115 trigger_type_bits: list[int] = field(init=False)
116
117 def __post_init__(self):
118 self.trigger_time = dts_to_datetime(self.trigger_timestamp_dts)
119 self.trigger_type_bits = [ trgdataformats.TriggerCandidateData.Type(i) for i in range(64) if (self.trigger_type & (1<<i))!=0 ]
120
121 def __str__(self):
122 base_str = super().__str__()
123
124 additional_fields = [f"trigger_timestamp={self.trigger_timestamp_dts} ({self.trigger_time})",
125 f"trigger_type={self.trigger_type} ({self.trigger_type_bits})",
126 f"n_fragments={self.n_fragments}",
127 f"n_requested_components={self.n_requested_components}",
128 f"max_sequence_number={self.max_sequence_number}",
129 f"total_size_bytes={self.total_size_bytes}",
130 f"error_bits={self.error_bits}"]
131 return f"{base_str}: [{', '.join(additional_fields)}]"
132
133
134@dataclass(order=True)
136
137 trigger_timestamp_dts: int
138 window_begin_dts: int
139 window_end_dts: int
140 det_id: int
141 error_bits: int
142 fragment_type: int
143 total_size_bytes: int
144 data_size_bytes: int
145 trigger_time : datetime = field(init=False)
146 window_begin_time : datetime = field(init=False)
147 window_end_time : datetime = field(init=False)
148
149 def __post_init__(self):
150 self.trigger_time = dts_to_datetime(self.trigger_timestamp_dts)
151 self.window_begin_time = dts_to_datetime(self.window_begin_dts)
152 self.window_end_time = dts_to_datetime(self.window_end_dts)
153
154 def __str__(self):
155 base_str = super().__str__()
156
157 fr_type = daqdataformats.FragmentType(self.fragment_type)
158 subdet = detdataformats.DetID.subdetector_to_string(detdataformats.DetID.Subdetector(self.det_id))
159 additional_fields = [f"trigger_timestamp={self.trigger_timestamp_dts}",
160 f"window [begin,end)=[{self.window_begin_dts},{self.window_end_dts})",
161 f"det_id={self.det_id} ('{subdet}')",
162 f"fragment_type={self.fragment_type} ('{daqdataformats.fragment_type_to_string(fr_type)}')",
163 f"total_size_bytes={self.total_size_bytes}",
164 f"data_size_bytes={self.data_size_bytes}",
165 f"error_bits={self.error_bits}"]
166 return f"{base_str}: [{', '.join(additional_fields)}]"
167
168
169@dataclass(order=True)
171
172 n_obj: int
173 version: int
174
175@dataclass(order=True)
177
178 time_start: int
179 samples_to_peak: int
180 samples_over_threshold: int
181 channel: int
182 plane: int
183 element: int
184 adc_integral: int
185 adc_peak: int
186 detid: int
187 flag: int
188 id_ta: int
189
190 def __str__(self):
191 base_str = super().__str__()
192 subdet = detdataformats.DetID.subdetector_to_string(detdataformats.DetID.Subdetector(self.detid))
193
194 additional_fields = [f"channel={self.channel}",
195 f"(plane,element)=({self.plane},{self.element})",
196 f"time_start={self.time_start}",
197 f"samples_to_peak={self.samples_to_peak}",
198 f"samples_over_threshold={self.samples_over_threshold}",
199 f"adc_integral={self.adc_integral}",
200 f"adc_peak={self.adc_peak}",
201 f"detid={self.detid} ('{subdet}')",
202 f"flag={self.flag}",
203 f"id_ta={self.id_ta}"]
204 return f"{base_str}: [{', '.join(additional_fields)}]"
205
206@dataclass(order=True)
208
209 time_start: int
210 time_end: int
211 time_peak: int
212 time_activity: int
213 channel_start: int
214 channel_end: int
215 channel_peak: int
216 plane: int
217 element: int
218 adc_integral: int
219 adc_peak: int
220 detid: int
221 ta_type: int
222 algorithm: int
223 n_tps: int
224 id: int
225 id_tc: int
226
227 def __str__(self):
228 base_str = super().__str__()
229 subdet = detdataformats.DetID.subdetector_to_string(detdataformats.DetID.Subdetector(self.detid))
230 tatype = trgdataformats.TriggerActivityData.Type(self.ta_type)
231 taalg = trgdataformats.TriggerActivityData.Algorithm(self.algorithm)
232
233 additional_fields = [f"id={self.id}",
234 f"channel (start,peak,end)=({self.channel_start},{self.channel_peak},{self.channel_end})",
235 f"(plane,element)=({self.plane},{self.element})",
236 f"time_activity={self.time_activity}",
237 f"time (start,peak,end)=({self.time_start},{self.time_peak},{self.time_end})",
238 f"adc_integral={self.adc_integral}",
239 f"adc_peak={self.adc_peak}",
240 f"detid={self.detid} ('{subdet}')",
241 f"ta_type={self.ta_type} ('{tatype})",
242 f"algorithm={self.algorithm} ('{taalg})",
243 f"n_tps={self.n_tps}",
244 f"id_tc={self.id_tc}"]
245 return f"{base_str}: [{', '.join(additional_fields)}]"
246
247@dataclass(order=True)
249
250 time_start: int
251 time_end: int
252 time_candidate: int
253 detid: int
254 tc_type: int
255 algorithm: int
256 n_tas: int
257 id: int
258
259 def __str__(self):
260 base_str = super().__str__()
261 subdet = detdataformats.DetID.subdetector_to_string(detdataformats.DetID.Subdetector(self.detid))
262 tctype = trgdataformats.TriggerCandidateData.Type(self.tc_type)
263 tcalg = trgdataformats.TriggerCandidateData.Algorithm(self.algorithm)
264
265 additional_fields = [f"id={self.id}",
266 f"time_candidate={self.time_candidate}",
267 f"time (start,end)=({self.time_start},{self.time_end})",
268 f"detid={self.detid} ('{subdet}')",
269 f"tc_type={self.ta_type} ('{tctype})",
270 f"algorithm={self.algorithm} ('{tcalg})",
271 f"n_tas={self.n_tas}"]
272 return f"{base_str}: [{', '.join(additional_fields)}]"
273
274@dataclass(order=True)
276
277 n_obj: int
278 daq_header_version: int
279 det_data_version: int
280 det_id: int
281 crate_id: int
282 slot_id: int
283 stream_id: int
284 timestamp_first_dts: int
285 timestamp_first_time: datetime = field(init=False)
286
287 def __post_init__(self):
288 self.timestamp_first_time = dts_to_datetime(self.timestamp_first_dts)
289
290 def __str__(self):
291 base_str = super().__str__()
292 subdet = detdataformats.DetID.subdetector_to_string(detdataformats.DetID.Subdetector(self.det_id))
293 additional_fields = [f"n_obj={self.n_obj}",
294 f"first_timestamp={self.timestamp_first_dts}",
295 f"det_id={self.det_id} ('{subdet}')",
296 f"(crate_id,slot_id,stream_id)=({self.crate_id},{self.slot_id},{self.stream_id})",
297 f"daq_header_version={self.daq_header_version}",
298 f"det_data_version={self.det_data_version}"]
299 return f"{base_str}: [{', '.join(additional_fields)}]"
300
301@dataclass(order=True)
303
304 #first frame only
305 femb_id: int
306 colddata_id: int
307 version: int
308
309 #_idx arrays contain indices where value has changed from previous
310 #_vals arrays contain the values at those indices
311 pulser_vals: np.ndarray
312 pulser_idx: np.ndarray
313 calibration_vals: np.ndarray
314 calibration_idx: np.ndarray
315 ready_vals: np.ndarray
316 ready_idx: np.ndarray
317 context_vals: np.ndarray
318 context_idx: np.ndarray
319
320 wib_sync_vals: np.ndarray
321 wib_sync_idx: np.ndarray
322 femb_sync_vals: np.ndarray
323 femb_sync_idx: np.ndarray
324
325 cd_vals: np.ndarray
326 cd_idx: np.ndarray
327 crc_err_vals: np.ndarray
328 crc_err_idx: np.ndarray
329 link_valid_vals: np.ndarray
330 link_valid_idx: np.ndarray
331 lol_vals: np.ndarray
332 lol_idx: np.ndarray
333
334 #these take differences between successive values,
335 #and then, as above, look for differences in those differences
336 #store first value so the full array can be reconstructed
337 colddata_timestamp_0_diff_vals: np.ndarray
338 colddata_timestamp_0_diff_idx: np.ndarray
339 colddata_timestamp_0_first: int
340
341 colddata_timestamp_1_diff_vals: np.ndarray
342 colddata_timestamp_1_diff_idx: np.ndarray
343 colddata_timestamp_1_first: int
344
345 timestamp_dts_diff_vals: np.ndarray
346 timestamp_dts_diff_idx: np.ndarray
347 timestamp_dts_first: int
348
349 n_frames: int
350 n_channels: int
351 sampling_period: int
352
353 def __str__(self):
354 base_str = super().__str__()
355 additional_fields = [f"n_frames={self.n_frames}",
356 f"n_channels={self.n_channels}",
357 f"sampling_period={self.sampling_period}",
358 f"femb_id={self.femb_id}",
359 f"coldata_id={self.colddata_id}",
360 f"version={self.version}",
361 f"first_timestamp={self.timestamp_dts_first}"]
362 additional_field_names = ["timestamp_dts_diff",
363 "colddata_timestamp_0_diff","colddata_timestamp_1_diff",
364 "cd","crc_err","link_valid","lol","wib_sync","femb_sync",
365 "pulser","calibration","ready","context"]
366 for name in additional_field_names:
367 vals_name = f'{name}_vals'
368 idx_name = f'{name}_idx'
369 additional_fields.append(f"{name}={getattr(self,vals_name)} (idx={getattr(self,idx_name)})")
370 return f"{base_str}: [{', '.join(additional_fields)}]"
371
372@dataclass(order=True)
374
375 channel: int
376 plane: int
377 element: int
378 wib_chan: int
379
380 @classmethod
381 def index_names(cls):
382 return [ "run","trigger","sequence","src_id","channel" ]
383
384 def index_values(self):
385 return [ self.runrunrun, self.triggertriggertrigger, self.sequencesequence, self.src_id, self.channel ]
386
387@dataclass(order=True)
389
390 adc_mean: float
391 adc_rms: float
392 adc_max: int
393 adc_min: int
394 adc_median: float
395
396 def __str__(self):
397 base_str = super().__str__()
398 additional_fields = [f"adc_mean={self.adc_mean}",
399 f"adc_rms={self.adc_rms}",
400 f"adc_max={self.adc_max}",
401 f"adc_min={self.adc_min}",
402 f"adc_median={self.adc_median}"]
403 return f"{base_str}: [{', '.join(additional_fields)}]"
404
405@dataclass(order=True)
407
408 timestamps: np.ndarray
409 adcs: np.ndarray
410 fft_mag: np.ndarray
411
412 def __str__(self):
413 base_str = super().__str__()
414 additional_fields = [f"timestamps={self.timestamps}",
415 f"adcs={self.adcs}",
416 f"fft_mag={self.fft_mag}"]
417 return f"{base_str}: [{', '.join(additional_fields)}]"
418
419@dataclass(order=True)
421
422 #first frame only
423 channel_id: int
424 tde_header: int
425 version: int
426
427 #_idx arrays contain indices where value has changed from previous
428 #_vals arrays contain the values at those indices
429 errors_vals: np.ndarray
430 errors_idx: np.ndarray
431
432 #these take differences between successive values,
433 #and then, as above, look for differences in those differences
434 #store first value so the full array can be reconstructed
435 timestamp_dts_diff_vals: np.ndarray[int, np.float128]
436 timestamp_dts_diff_idx: np.ndarray
437 timestamp_dts_first: int
438
439 tai_time_diff_vals: np.ndarray[int, np.float128]
440 tai_time_diff_idx: np.ndarray
441 tai_time_first: int
442
443 n_frames: int
444 n_channels: int
445 sampling_period: int
446
447 def __str__(self):
448 base_str = super().__str__()
449 additional_fields = [f"n_frames={self.n_frames}",
450 f"n_channels={self.n_channels}",
451 f"sampling_period={self.sampling_period}",
452 f"channel_id={self.channel_id}",
453 f"tde_header={self.tde_header}",
454 f"version={self.version}",
455 f"first_timestamp={self.timestamp_first_dts}",
456 f"tai_time_first={self.tai_time_first}"]
457 additional_field_names = ["timestamp_dts_diff","tai_time_diff","errors"]
458 for name in additional_field_names:
459 vals_name = f'{name}_vals'
460 idx_name = f'{name}_idx'
461 additional_fields.append(f"{name}={getattr(self,vals_name)} (idx={getattr(self,idx_name)})")
462 return f"{base_str}: [{', '.join(additional_fields)}]"
463
464@dataclass(order=True)
466
467 channel: int
468 plane: int
469 element: int
470 tde_chan: int
471
472 @classmethod
473 def index_names(cls):
474 return [ "run","trigger","sequence","src_id","channel" ]
475
476 def index_values(self):
477 return [ self.runrunrun, self.triggertriggertrigger, self.sequencesequence, self.src_id, self.channel ]
478
479@dataclass(order=True)
481
482 adc_mean: float
483 adc_rms: float
484 adc_max: int
485 adc_min: int
486 adc_median: float
487
488 def __str__(self):
489 base_str = super().__str__()
490 additional_fields = [f"adc_mean={self.adc_mean}",
491 f"adc_rms={self.adc_rms}",
492 f"adc_max={self.adc_max}",
493 f"adc_min={self.adc_min}",
494 f"adc_median={self.adc_median}"]
495 return f"{base_str}: [{', '.join(additional_fields)}]"
496
497@dataclass(order=True)
499
500 timestamps: np.ndarray[int, np.float128]
501 adcs: np.ndarray
502 fft_mag: np.ndarray
503
504 def __str__(self):
505 base_str = super().__str__()
506 additional_fields = [f"timestamps={self.timestamps}",
507 f"adcs={self.adcs}",
508 f"fft_mag={self.fft_mag}"]
509 return f"{base_str}: [{', '.join(additional_fields)}]"
510
511@dataclass(order=True)
513
514 n_channels: int
515 sampling_period: int
516 ts_diffs_vals: np.ndarray
517 ts_diffs_counts: np.ndarray
518
519 def __str__(self):
520 base_str = super().__str__()
521 additional_fields = [f"n_channels={self.n_channels}",
522 f"sampling_period={self.sampling_period}",
523 f"ts_diffs_vals={self.ts_diffs_vals} (counts={self.ts_diffs_counts})"]
524 return f"{base_str}: [{', '.join(additional_fields)}]"
525
526@dataclass(order=True)
528
529 channel: int
530 daphne_chan: int
531
532 @classmethod
533 def index_names(cls):
534 return [ "run","trigger","sequence","src_id","channel" ]
535
536 def index_values(self):
537 return [ self.runrunrun, self.triggertriggertrigger, self.sequencesequence, self.src_id, self.channel ]
538
539
540@dataclass(order=True)
542
543 adc_mean: float
544 adc_rms: float
545 adc_max: int
546 adc_min: int
547 adc_median: float
548
549 def __str__(self):
550 base_str = super().__str__()
551 additional_fields = [f"adc_mean={self.adc_mean}",
552 f"adc_rms={self.adc_rms}",
553 f"adc_max={self.adc_max}",
554 f"adc_min={self.adc_min}",
555 f"adc_median={self.adc_median}"]
556 return f"{base_str}: [{', '.join(additional_fields)}]"
557
558@dataclass(order=True)
560
561 timestamps: np.ndarray
562 adcs: np.ndarray
563 fft_mag: np.ndarray
564
565 def __str__(self):
566 base_str = super().__str__()
567 additional_fields = [f"timestamps={self.timestamps}",
568 f"adcs={self.adcs}",
569 f"fft_mag={self.fft_mag}"]
570 return f"{base_str}: [{', '.join(additional_fields)}]"
571
572@dataclass(order=True)
574
575 timestamp_dts: int
576 trigger_sample_value: int
577 threshold: float
578 baseline: float
579 adc_mean: float
580 adc_rms: float
581 adc_max: int
582 adc_min: int
583 adc_median: float
584 timestamp_max_dts: int
585 timestamp_min_dts: int
586
587 def __str__(self):
588 base_str = super().__str__()
589 additional_fields = [f"timestamp_dts={self.timestamp_dts}",
590 f"trigger_sample_value={self.trigger_sample_value}",
591 f"baseline={self.baseline}",
592 f"threshold={self.threshold}",
593 f"adc_mean={self.adc_mean}",
594 f"adc_rms={self.adc_rms}",
595 f"adc_max={self.adc_max} (timestamp={self.timestamp_max_dts})",
596 f"adc_min={self.adc_min} (timestamp={self.timestamp_min_dts})",
597 f"adc_median={self.adc_median}"]
598 return f"{base_str}: [{', '.join(additional_fields)}]"
599
600@dataclass(order=True)
602
603 timestamp_dts: int
604 timestamps: np.ndarray
605 adcs: np.ndarray
606
607 def __str__(self):
608 base_str = super().__str__()
609 additional_fields = [f"timestamp_dts={self.timestamp_dts}",
610 f"timestamps={self.timestamps}",
611 f"adcs={self.adcs}"]
612 return f"{base_str}: [{', '.join(additional_fields)}]"
desparsify_array_diff_locs_and_vals(change_locations, change_values, arr_size)
desparsify_array_diff_of_diff_locs_and_vals(arr_first, change_locations, change_values, arr_size)
sparsify_array_diff_locs_and_vals(arr)
Sparsification and desparsifications for arrays.