DUNE-DAQ
DUNE Trigger and Data Acquisition software
Loading...
Searching...
No Matches
SNBRequestHandlerModel.hxx
Go to the documentation of this file.
1// Declarations for SNBRequestHandlerModel
2
3namespace dunedaq {
4namespace snbmodules {
5
6template<class RDT, class LBT>
7void
9{
10
11 auto reqh_conf = conf->get_module_configuration()->get_request_handler();
12 m_sourceid.id = conf->get_source_id();
13 m_sourceid.subsystem = RDT::subsystem;
14 m_detid = conf->get_detector_id();
15
16 m_buffer_capacity = conf->get_module_configuration()->get_latency_buffer()->get_size();
17 m_num_request_handling_threads = reqh_conf->get_handler_threads();
18 m_request_timeout_ms = reqh_conf->get_request_timeout();
19
20 for (auto output : conf->get_outputs()) {
21 if (output->get_data_type() == "Fragment") {
22 m_fragment_send_timeout_ms = output->get_send_timeout_ms();
23 // 19-Dec-2024, KAB: store the names/IDs of the Fragment output connections so that
24 // we can confirm that they are ready for sending at 'start' time.
25 m_frag_out_conn_ids.push_back(output->UID());
26 }
27 }
28
29 if (m_recording_configured == false) {
30 auto dr = reqh_conf->get_data_recorder();
31 if (dr != nullptr) {
32 m_output_file = dr->get_output_file();
33 if (remove(m_output_file.c_str()) == 0) {
34 TLOG_DEBUG(TLVL_WORK_STEPS) << "Removed existing output file from previous run: " << m_output_file << std::endl;
35 }
36 m_stream_buffer_size = dr->get_streaming_buffer_size();
37 m_buffered_writer.open(
38 m_output_file, m_stream_buffer_size, dr->get_compression_algorithm(), dr->get_use_o_direct());
39 m_recording_configured = true;
40 }
41 }
42
43 m_warn_on_timeout = reqh_conf->get_warn_on_timeout();
44 m_warn_about_empty_buffer = reqh_conf->get_warn_on_empty_buffer();
45 m_periodic_data_transmission_ms = reqh_conf->get_periodic_data_transmission_ms();
46
47 m_recording_thread.set_name("recording", m_sourceid.id);
48 m_cleanup_thread.set_name("cleanup", m_sourceid.id);
49 m_periodic_transmission_thread.set_name("periodic", m_sourceid.id);
50
51 std::ostringstream oss;
52 oss << "RequestHandler configured. ";
53 TLOG_DEBUG(TLVL_WORK_STEPS) << oss.str();
54}
55
56template<class RDT, class LBT>
57void
58SNBRequestHandlerModel<RDT, LBT>::scrap(const appfwk::DAQModule::CommandData_t& /*args*/)
59{
60 if (m_buffered_writer.is_open()) {
61 m_buffered_writer.close();
62 }
63}
64
65template<class RDT, class LBT>
66void
67SNBRequestHandlerModel<RDT, LBT>::start(const appfwk::DAQModule::CommandData_t& /*args*/)
68{
69 // Reset opmon variables
70 m_num_requests_found = 0;
71 m_num_requests_bad = 0;
72 m_num_requests_old_window = 0;
73 m_num_requests_delayed = 0;
74 m_num_requests_uncategorized = 0;
75 m_num_buffer_cleanups = 0;
76 m_num_requests_timed_out = 0;
77 m_handled_requests = 0;
78 m_response_time_acc = 0;
79 m_pop_reqs = 0;
80 m_pops_count = 0;
81 m_payloads_written = 0;
82 m_bytes_written = 0;
83
84 m_t0 = std::chrono::high_resolution_clock::now();
85
86 // 19-Dec-2024, KAB: check that Fragment senders are ready to send. This is done so
87 // that the IOManager infrastructure fetches the necessary connection details from
88 // the ConnectivityService at 'start' time, instead of the first time that the sender
89 // is used to send data. This avoids delays in the sending of the first fragment in
90 // the first data-taking run in a DAQ session. Such delays can lead to undesirable
91 // system behavior like trigger inhibits.
92 for (auto frag_out_conn : m_frag_out_conn_ids) {
94 if (sender != nullptr) {
95 bool is_ready = sender->is_ready_for_sending(std::chrono::milliseconds(100));
96 TLOG_DEBUG(0) << "The Fragment sender for " << frag_out_conn << " " << (is_ready ? "is" : "is not")
97 << " ready, my source_id is [" << m_sourceid << "]";
98 }
99 }
100
101 m_request_handler_thread_pool = std::make_unique<boost::asio::thread_pool>(m_num_request_handling_threads);
102
103 m_run_marker.store(true);
104 m_cleanup_thread.set_work(&SNBRequestHandlerModel<RDT, LBT>::periodic_cleanups, this);
105 if (m_periodic_data_transmission_ms > 0) {
106 m_periodic_transmission_thread.set_work(&SNBRequestHandlerModel<RDT, LBT>::periodic_data_transmissions, this);
107 }
108
109 m_waiting_queue_thread = std::thread(&SNBRequestHandlerModel<RDT, LBT>::check_waiting_requests, this);
110}
111
112template<class RDT, class LBT>
113void
114SNBRequestHandlerModel<RDT, LBT>::stop(const appfwk::DAQModule::CommandData_t& /*args*/)
115{
116 m_run_marker.store(false);
117 while (!m_recording_thread.get_readiness()) {
118 std::this_thread::sleep_for(std::chrono::milliseconds(10));
119 }
120 while (!m_cleanup_thread.get_readiness()) {
121 std::this_thread::sleep_for(std::chrono::milliseconds(10));
122 }
123 while (!m_periodic_transmission_thread.get_readiness()) {
124 std::this_thread::sleep_for(std::chrono::milliseconds(10));
125 }
126 TLOG() << "Latency buffer occupancy at stop: " << m_latency_buffer->occupancy();
127 m_waiting_queue_thread.join();
128 m_request_handler_thread_pool->join();
129}
130
131template<class RDT, class LBT>
132void
133SNBRequestHandlerModel<RDT, LBT>::record(const appfwk::DAQModule::CommandData_t& /*args*/)
134{
135 ers::error(datahandlinglibs::CommandError(ERS_HERE, m_sourceid, "DLH is not configured for recording"));
136 return;
137}
138
139template<class RDT, class LBT>
140void
142{
143 std::unique_lock<std::mutex> lock(m_cv_mutex);
144 if (!m_pop_list.empty() && !m_cleanup_requested.exchange(true)) {
145 m_cv.wait(lock, [&] { return m_requests_running == 0; });
146 cleanup();
147 m_cleanup_requested = false;
148 m_cv.notify_all();
149 }
150}
151
152template<class RDT, class LBT>
153void
155{
156 boost::asio::post(*m_request_handler_thread_pool, [&, datarequest, is_retry]() { // start a thread from pool
157 auto t_req_begin = std::chrono::high_resolution_clock::now();
158 {
159 std::unique_lock<std::mutex> lock(m_cv_mutex);
160 m_cv.wait(lock, [&] { return !m_cleanup_requested; });
161 m_requests_running++;
162 }
163 m_cv.notify_all();
164 auto result = data_request(datarequest);
165 {
166 std::lock_guard<std::mutex> lock(m_cv_mutex);
167 m_requests_running--;
168 }
169 m_cv.notify_all();
170 if ((result.result_code == ResultCode::kNotYet || result.result_code == ResultCode::kPartial) &&
171 m_request_timeout_ms > 0 && is_retry == false) {
172 TLOG_DEBUG(TLVL_WORK_STEPS) << "Re-queue request. "
173 << " with timestamp=" << result.data_request.trigger_timestamp;
174 std::lock_guard<std::mutex> wait_lock_guard(m_waiting_requests_lock);
175 m_waiting_requests.push_back(RequestElement(datarequest, std::chrono::high_resolution_clock::now()));
176 } else {
177 try { // Send to fragment connection
178 TLOG_DEBUG(TLVL_WORK_STEPS) << "Sending fragment with trigger/sequence_number "
179 << result.fragment->get_trigger_number() << "."
180 << result.fragment->get_sequence_number() << ", run number "
181 << result.fragment->get_run_number() << ", and DetectorID "
182 << result.fragment->get_detector_id() << ", and SourceID "
183 << result.fragment->get_element_id() << ", and size " << result.fragment->get_size()
184 << ", and result code " << result.result_code;
185 // Send fragment
187 ->send(std::move(result.fragment), std::chrono::milliseconds(m_fragment_send_timeout_ms));
188 cleanup_check();
189
190 } catch (const ers::Issue& excpt) {
191 ers::warning(datahandlinglibs::CannotWriteToQueue(ERS_HERE, m_sourceid, datarequest.data_destination, excpt));
192 }
193 }
194
195 auto t_req_end = std::chrono::high_resolution_clock::now();
196 auto us_req_took = std::chrono::duration_cast<std::chrono::microseconds>(t_req_end - t_req_begin);
197 TLOG_DEBUG(TLVL_WORK_STEPS) << "Responding to data request took: " << us_req_took.count() << "[us]";
198 m_response_time_acc.fetch_add(us_req_took.count());
199 if (us_req_took.count() > m_response_time_max.load())
200 m_response_time_max.store(us_req_took.count());
201 if (us_req_took.count() < m_response_time_min.load())
202 m_response_time_min.store(us_req_took.count());
203 m_handled_requests++;
204 });
205}
206
207template<class RDT, class LBT>
208void
210{
212
213 info.set_num_requests_handled(m_handled_requests.exchange(0));
214 info.set_num_requests_found(m_num_requests_found.exchange(0));
215 info.set_num_requests_bad(m_num_requests_bad.exchange(0));
216 info.set_num_requests_old_window(m_num_requests_old_window.exchange(0));
217 info.set_num_requests_delayed(m_num_requests_delayed.exchange(0));
218 info.set_num_requests_uncategorized(m_num_requests_uncategorized.exchange(0));
219 info.set_num_requests_timed_out(m_num_requests_timed_out.exchange(0));
220 info.set_num_requests_waiting(m_waiting_requests.size());
221
222 int new_pop_reqs = 0;
223 int new_pop_count = 0;
224 int new_occupancy = 0;
225 info.set_tot_request_response_time(m_response_time_acc.exchange(0));
226 info.set_max_request_response_time(m_response_time_max.exchange(0));
227 info.set_min_request_response_time(m_response_time_min.exchange(std::numeric_limits<int>::max()));
228 auto now = std::chrono::high_resolution_clock::now();
229 new_pop_reqs = m_pop_reqs.exchange(0);
230 new_pop_count = m_pops_count.exchange(0);
231 new_occupancy = m_occupancy;
232 double seconds = std::chrono::duration_cast<std::chrono::microseconds>(now - m_t0).count() / 1000000.;
233 TLOG_DEBUG(TLVL_HOUSEKEEPING) << "Cleanup request rate: " << new_pop_reqs / seconds / 1. << " [Hz]"
234 << " Dropped: " << new_pop_count << " Occupancy: " << new_occupancy;
235
236 if (info.num_requests_handled() > 0) {
237
238 info.set_avg_request_response_time(info.tot_request_response_time() / info.num_requests_handled());
239 TLOG_DEBUG(TLVL_HOUSEKEEPING) << "Completed requests: " << info.num_requests_handled()
240 << " | Avarage response time: " << info.avg_request_response_time() << "[us]"
241 << " | Periodic sends: " << info.num_periodic_sent();
242 }
243
244 m_t0 = now;
245
246 info.set_num_buffer_cleanups(m_num_buffer_cleanups.exchange(0));
247 info.set_num_periodic_sent(m_num_periodic_sent.exchange(0));
248 info.set_num_periodic_send_failed(m_num_periodic_send_failed.exchange(0));
249
250 this->publish(std::move(info));
251
253 rinfo.set_recording_status(m_recording ? "Y" : "N");
254 rinfo.set_packets_recorded(m_payloads_written.exchange(0));
255 rinfo.set_bytes_recorded(m_bytes_written.exchange(0));
256 this->publish(std::move(rinfo));
257}
258
259template<class RDT, class LBT>
260std::unique_ptr<daqdataformats::Fragment>
262{
263 auto frag_header = create_fragment_header(dr);
264 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kDataNotFound));
265 auto fragment = std::make_unique<daqdataformats::Fragment>(std::vector<std::pair<void*, size_t>>());
266 fragment->set_header_fields(frag_header);
267 return fragment;
268}
269
270template<class RDT, class LBT>
271void
273{
274 while (m_run_marker.load()) {
275 cleanup_check();
276 std::this_thread::sleep_for(std::chrono::milliseconds(50));
277 }
278}
279
280template<class RDT, class LBT>
281void
283{
284 while (m_run_marker.load()) {
285 periodic_data_transmission();
286 std::this_thread::sleep_for(std::chrono::milliseconds(m_periodic_data_transmission_ms));
287 }
288}
289
290template<class RDT, class LBT>
291void
295
296template<class RDT, class LBT>
297void
299{
300 ++m_pop_reqs;
301 unsigned popped = 0;
302 {
303 std::lock_guard<std::mutex> lk(m_pop_list_mutex);
304 // TLOG_DEBUG(TLVL_HOUSEKEEPING) << "Cleanup requested, there are " << m_pop_list.size()
305 // << " entries in the cleanup list, first TS=" << *m_pop_list.begin()
306 // << ", buffer TS=" << m_latency_buffer->front()->get_timestamp();
307 while (!m_pop_list.empty() && m_latency_buffer->occupancy() > 1) {
308 auto ts = m_latency_buffer->front()->get_timestamp();
309 if (m_pop_list.count(ts)) {
310 m_latency_buffer->pop(1);
311 popped++;
312 m_pop_list.erase(ts);
313 } else {
314 break;
315 }
316 }
317 }
318 m_occupancy = m_latency_buffer->occupancy();
319 m_pops_count += popped;
320 m_error_registry->remove_errors_until(m_latency_buffer->front()->get_timestamp());
321
322 // Update hte oldest timestamp monitorable
323 m_oldest_timestamp = m_latency_buffer->front()->get_timestamp();
324 m_num_buffer_cleanups++;
325}
326
327template<class RDT, class LBT>
328void
330{
331 // At run stop, we wait until all waiting requests have either:
332 //
333 // 1. been serviced because an item past the end of the window arrived in the buffer
334 // 2. timed out by going past m_request_timeout_ms, and returned a partial fragment
335 while (m_run_marker.load()) {
336 if (m_waiting_requests.size() > 0) {
337
338 std::lock_guard<std::mutex> lock_guard(m_waiting_requests_lock);
339
340 auto last_frame = m_latency_buffer->back(); // NOLINT
341 uint64_t newest_ts = last_frame == nullptr ? std::numeric_limits<uint64_t>::min() // NOLINT(build/unsigned)
342 : last_frame->get_timestamp();
343
344 for (auto iter = m_waiting_requests.begin(); iter != m_waiting_requests.end();) {
345 if ((*iter).request.request_information.window_end <= newest_ts) {
346 issue_request((*iter).request, true);
347 iter = m_waiting_requests.erase(iter);
348 } else if (std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() -
349 (*iter).start_time)
350 .count() >= m_request_timeout_ms) {
351 issue_request((*iter).request, true);
352 if (m_warn_on_timeout) {
354 dunedaq::datahandlinglibs::VerboseRequestTimedOut(ERS_HERE,
355 m_sourceid,
356 (*iter).request.trigger_number,
357 (*iter).request.sequence_number,
358 (*iter).request.run_number,
359 (*iter).request.request_information.window_begin,
360 (*iter).request.request_information.window_end,
361 (*iter).request.data_destination));
362 }
363 m_num_requests_bad++;
364 m_num_requests_timed_out++;
365 iter = m_waiting_requests.erase(iter);
366 } else {
367 ++iter;
368 }
369 }
370 }
371 std::this_thread::sleep_for(std::chrono::milliseconds(1));
372 }
373}
374
375template<class RDT, class LBT>
376std::vector<std::pair<void*, size_t>>
377SNBRequestHandlerModel<RDT, LBT>::get_fragment_pieces(uint64_t start_win_ts, uint64_t end_win_ts, RequestResult& rres)
378{
379
380 TLOG_DEBUG(TLVL_WORK_STEPS) << "Looking for frags between " << start_win_ts << " and " << end_win_ts;
381
382 std::vector<std::pair<void*, size_t>> frag_pieces;
383 // Data availability is calculated here
384 auto front_element = m_latency_buffer->front(); // NOLINT
385 auto last_element = m_latency_buffer->back(); // NOLINT
386 uint64_t last_ts = front_element->get_timestamp(); // NOLINT(build/unsigned)
387 uint64_t newest_ts = last_element->get_timestamp(); // NOLINT(build/unsigned)
388
389 if (start_win_ts > newest_ts || newest_ts < end_win_ts) {
390 // No element is as small as the start window-> request is far in the future
391 rres.result_code = ResultCode::kNotYet; // give it another chance
392 } else if (end_win_ts <= last_ts) {
393 rres.result_code = ResultCode::kTooOld;
394 } else {
395 RDT request_element = RDT();
396 auto start_timestamp = start_win_ts;
397 request_element.set_timestamp(start_timestamp);
398
399 auto start_iter = m_latency_buffer->begin();
400
401 if (!start_iter.good()) {
402 // Accessor problem
403 rres.result_code = ResultCode::kNotFound;
404 } else {
405 TLOG_DEBUG(TLVL_WORK_STEPS) << "Lower bound found " << start_iter->get_timestamp()
406 << ", --> distance from window: "
407 << int64_t(start_win_ts) - int64_t(start_iter->get_timestamp());
408
409 rres.result_code = ResultCode::kFound;
410
411 auto elements_handled = 0;
412
413 RDT* element = &(*start_iter);
414
415 while (start_iter.good() && element->get_timestamp() <= end_win_ts) {
416 std::lock_guard<std::mutex> lk(m_pop_list_mutex);
417 if (m_pop_list.count(element->get_timestamp())) {
418 TLOG_DEBUG(50) << "skip processing for current element " << element->get_timestamp()
419 << ", already included in trigger.";
420 } else if (element->get_timestamp() < start_win_ts) {
421 TLOG_DEBUG(51) << "skip processing for current element " << element->get_timestamp()
422 << ", out of readout window.";
423 }
424
425 else {
426 // TLOG_DEBUG(52) << "Add element " << element->get_timestamp();
427 // SNB mode, the whole aggregated object (e.g.: superchunk) can be copied
428 frag_pieces.emplace_back(
429 std::make_pair<void*, size_t>(static_cast<void*>((*start_iter).begin()), element->get_payload_size()));
430
431 m_pop_list.insert(element->get_timestamp());
432 }
433
434 elements_handled++;
435 ++start_iter;
436 element = &(*start_iter);
437 }
438 }
439 }
440 TLOG_DEBUG(TLVL_WORK_STEPS) << "*** Number of frames retrieved: " << frag_pieces.size();
441 return frag_pieces;
442}
443
444template<class RDT, class LBT>
447{
448 // Prepare response
449 RequestResult rres(ResultCode::kUnknown, dr);
450
451 // Prepare FragmentHeader and empty Fragment pieces list
452 auto frag_header = create_fragment_header(dr);
453 std::vector<std::pair<void*, size_t>> frag_pieces;
454 std::ostringstream oss;
455
456 // bool local_data_not_found_flag = false;
457 if (m_latency_buffer->occupancy() == 0) {
458 if (m_warn_about_empty_buffer) {
459 ers::warning(datahandlinglibs::RequestOnEmptyBuffer(ERS_HERE, m_sourceid, "Data not found"));
460 }
461 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kDataNotFound));
462 rres.result_code = ResultCode::kNotFound;
463 ++m_num_requests_bad;
464 } else {
465 frag_pieces = get_fragment_pieces(dr.request_information.window_begin, dr.request_information.window_end, rres);
466
467 auto front_element = m_latency_buffer->front(); // NOLINT
468 auto last_element = m_latency_buffer->back(); // NOLINT
469 uint64_t last_ts = front_element->get_timestamp(); // NOLINT(build/unsigned)
470 uint64_t newest_ts = last_element->get_timestamp(); // NOLINT(build/unsigned)
471 TLOG_DEBUG(TLVL_WORK_STEPS) << "Data request for trig/seq_num=" << dr.trigger_number << "." << dr.sequence_number
472 << " and SourceID[" << m_sourceid << "] with" << " Trigger TS=" << dr.trigger_timestamp
473 << " Start of window TS=" << dr.request_information.window_begin
474 << " End of window TS=" << dr.request_information.window_end;
475 TLOG_DEBUG(TLVL_WORK_STEPS) << " Oldest stored TS=" << last_ts << " Newest stored TS=" << newest_ts
476 << " Latency buffer occupancy=" << m_latency_buffer->occupancy();
477 TLOG_DEBUG(TLVL_WORK_STEPS) << " frag_pieces result_code=" << rres.result_code
478 << " number of frag_pieces=" << frag_pieces.size();
479
480 switch (rres.result_code) {
481 case ResultCode::kTooOld:
482 // return empty frag
483 ++m_num_requests_old_window;
484 ++m_num_requests_bad;
485 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kDataNotFound));
486 break;
487 case ResultCode::kPartiallyOld:
488 ++m_num_requests_old_window;
489 ++m_num_requests_found;
490 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kIncomplete));
491 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kDataNotFound));
492 break;
493 case ResultCode::kFound:
494 ++m_num_requests_found;
495 break;
496 case ResultCode::kPartial:
497 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kIncomplete));
498 ++m_num_requests_delayed;
499 break;
500 case ResultCode::kNotYet:
501 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kDataNotFound));
502 ++m_num_requests_delayed;
503 break;
504 default:
505 // Unknown result of data search
506 ++m_num_requests_bad;
507 frag_header.error_bits |= (0x1 << static_cast<size_t>(daqdataformats::FragmentErrorBits::kDataNotFound));
508 }
509 }
510 // Create fragment from pieces
511 rres.fragment = std::make_unique<daqdataformats::Fragment>(frag_pieces);
512
513 // Set header
514 rres.fragment->set_header_fields(frag_header);
515
516 return rres;
517}
518
519} // namespace snbmodules
520} // namespace dunedaq
#define ERS_HERE
const dunedaq::appmodel::LatencyBuffer * get_latency_buffer() const
Get "latency_buffer" relationship value.
const dunedaq::appmodel::RequestHandler * get_request_handler() const
Get "request_handler" relationship value.
uint32_t get_detector_id() const
Get "detector_id" attribute value.
const dunedaq::appmodel::DataHandlerConf * get_module_configuration() const
Get "module_configuration" relationship value.
uint32_t get_source_id() const
Get "source_id" attribute value.
uint32_t get_size() const
Get "size" attribute value.
const std::vector< const dunedaq::confmodel::Connection * > & get_outputs() const
Get "outputs" relationship value. Output connections from this module.
void set_recording_status(Arg_ &&arg, Args_... args)
void start(const appfwk::DAQModule::CommandData_t &)
typename dunedaq::datahandlinglibs::RequestHandlerConcept< ReadoutType, LatencyBufferType >::RequestResult RequestResult
RequestResult data_request(dfmessages::DataRequest dr) override
void conf(const dunedaq::appmodel::DataHandlerModule *)
std::unique_ptr< daqdataformats::Fragment > create_empty_fragment(const dfmessages::DataRequest &dr)
void issue_request(dfmessages::DataRequest datarequest, bool is_retry=false) override
Issue a data request to the request handler.
void scrap(const appfwk::DAQModule::CommandData_t &) override
void cleanup_check() override
Check if cleanup is necessary and execute it if necessary.
void stop(const appfwk::DAQModule::CommandData_t &)
std::vector< std::pair< void *, size_t > > get_fragment_pieces(uint64_t start_win_ts, uint64_t end_win_ts, RequestResult &rres)
void record(const appfwk::DAQModule::CommandData_t &args) override
virtual void periodic_data_transmission() override
Periodic data transmission - relevant for trigger in particular.
Base class for any user define issue.
Definition Issue.hpp:69
static int64_t now()
#define TLOG_DEBUG(lvl,...)
Definition Logging.hpp:112
#define TLOG(...)
Definition macro.hpp:22
@ kIncomplete
Only part of the requested data is present in the fragment.
@ kDataNotFound
The requested data was not found at all, so the fragment is empty.
Including Qt Headers.
static std::shared_ptr< iomanager::SenderConcept< Datatype > > get_iom_sender(iomanager::ConnectionId const &id)
void warning(const Issue &issue)
Definition ers.hpp:115
void error(const Issue &issue)
Definition ers.hpp:81
timestamp_t window_end
End of the data collection window.
timestamp_t window_begin
Start of the data collection window.
This message represents a request for data sent to a single component of the DAQ.
sequence_number_t sequence_number
Sequence Number of the request.
trigger_number_t trigger_number
Trigger number the request corresponds to.
timestamp_t trigger_timestamp
Timestamp of trigger.