LCOV - code coverage report
Current view: top level - trgtools/apps - tapipe.cxx (source / functions) Coverage Total Hit
Test: code.result Lines: 0.0 % 94 0
Test Date: 2025-12-21 13:07:08 Functions: 0.0 % 7 0

            Line data    Source code
       1              : /**
       2              :  * @file tapipe.cxx
       3              :  *
       4              :  * Developer(s) of this DAQ application have yet to replace this line with a brief description of the application.
       5              :  *
       6              :  * This is part of the DUNE DAQ Application Framework, copyright 2020.
       7              :  * Licensing/copyright details are in the COPYING file that you should have
       8              :  * received with this code.
       9              :  */
      10              : 
      11              : #include "CLI/App.hpp"
      12              : #include "CLI/Config.hpp"
      13              : #include "CLI/Formatter.hpp"
      14              : 
      15              : #include <fmt/core.h>
      16              : #include <fmt/format.h>
      17              : 
      18              : #include "hdf5libs/HDF5RawDataFile.hpp"
      19              : #include "trgdataformats/TriggerPrimitive.hpp"
      20              : #include "triggeralgs/HorizontalMuon/TAMakerHorizontalMuonAlgorithm.hpp"
      21              : #include "triggeralgs/TriggerObjectOverlay.hpp"
      22              : 
      23              : using namespace dunedaq;
      24              : 
      25              : class HDF5FileReader
      26              : {
      27              : private:
      28              :   std::unique_ptr<hdf5libs::HDF5RawDataFile> m_file;
      29              : 
      30              : public:
      31              :   HDF5FileReader(const std::string& path);
      32              :   ~HDF5FileReader();
      33              : };
      34              : 
      35            0 : HDF5FileReader::HDF5FileReader(const std::string& path)
      36              : {
      37            0 :     m_file = std::make_unique<hdf5libs::HDF5RawDataFile>(path);
      38            0 : }
      39              : 
      40            0 : HDF5FileReader::~HDF5FileReader()
      41              : {
      42            0 : }
      43              : //-----
      44              : 
      45              : template <> struct fmt::formatter<dunedaq::daqdataformats::SourceID> {
      46              :   // Presentation format: 'f' - fixed, 'e' - exponential.
      47              :   // char presentation = 'f';
      48              : 
      49              :   // // Parses format specifications of the form ['f' | 'e'].
      50              :   // constexpr auto parse(format_parse_context& ctx) -> format_parse_context::iterator {
      51              :   //   // [ctx.begin(), ctx.end()) is a character range that contains a part of
      52              :   //   // the format string starting from the format specifications to be parsed,
      53              :   //   // e.g. in
      54              :   //   //
      55              :   //   //   fmt::format("{:f} - point of interest", point{1, 2});
      56              :   //   //
      57              :   //   // the range will contain "f} - point of interest". The formatter should
      58              :   //   // parse specifiers until '}' or the end of the range. In this example
      59              :   //   // the formatter should parse the 'f' specifier and return an iterator
      60              :   //   // pointing to '}'.
      61              : 
      62              :   //   // Please also note that this character range may be empty, in case of
      63              :   //   // the "{}" format string, so therefore you should check ctx.begin()
      64              :   //   // for equality with ctx.end().
      65              : 
      66              :   //   // Parse the presentation format and store it in the formatter:
      67              :   //   auto it = ctx.begin(), end = ctx.end();
      68              :   //   if (it != end && (*it == 'f' || *it == 'e')) presentation = *it++;
      69              : 
      70              :   //   // Check if reached the end of the range:
      71              :   //   if (it != end && *it != '}') throw_format_error("invalid format");
      72              : 
      73              :   //   // Return an iterator past the end of the parsed range:
      74              :   //   return it;
      75              :   // }
      76              : 
      77              :   // Parses format specifications of the form ['f' | 'e'].
      78            0 :   constexpr auto parse(format_parse_context& ctx) -> format_parse_context::iterator {
      79              : 
      80              :     // Parse the presentation format and store it in the formatter:
      81              :     // auto it = ctx.begin(), end = ctx.end();
      82              :     // if (it != end && (*it == 'f' || *it == 'e')) presentation = *it++;
      83              : 
      84              :     // Check if reached the end of the range:
      85              :     // if (it != end && *it != '}')
      86              :       // fmt::detail::throw_format_error("invalid format");
      87              :   
      88              :     // return it;  
      89            0 :     return ctx.begin();
      90              :   }
      91              :   
      92              : 
      93              :   // Formats the point p using the parsed format specification (presentation)
      94              :   // stored in this formatter.
      95            0 :   auto format(const dunedaq::daqdataformats::SourceID& sid, format_context& ctx) const -> format_context::iterator {
      96            0 :     return fmt::format_to(ctx.out(), "({}, {})", dunedaq::daqdataformats::SourceID::subsystem_to_string(sid.subsystem), sid.id);
      97              :   }
      98              : };
      99              : 
     100              : 
     101              : template <> struct fmt::formatter<hdf5libs::HDF5RawDataFile::record_id_t> {
     102              : 
     103              :   // Parses format specifications of the form ['f' | 'e'].
     104            0 :   constexpr auto parse(format_parse_context& ctx) -> format_parse_context::iterator {
     105            0 :     return ctx.begin();
     106              :   }
     107              :   
     108              :   // Formats the point p using the parsed format specification (presentation)
     109              :   // stored in this formatter.
     110            0 :   auto format(const hdf5libs::HDF5RawDataFile::record_id_t& rid, format_context& ctx) const -> format_context::iterator {
     111            0 :     return fmt::format_to(ctx.out(), "({}, {})", rid.first, rid.second);
     112              :   }
     113              : };
     114              : //-----
     115              : 
     116              : int
     117            0 : main(int argc, char* argv[])
     118              : {
     119            0 :   std::string input_file;
     120            0 :   bool verbose = false;
     121            0 :   CLI::App app{"tapipe"};
     122              :   // argv = app.ensure_utf8(argv);
     123              : 
     124            0 :   app.add_option("-i", input_file, "Input TPStream file path")->required();
     125            0 :   app.add_flag("-v", verbose);
     126            0 :   CLI11_PARSE(app, argc, argv);
     127              : 
     128            0 :   fmt::print("TPStream file: {}\n", input_file);
     129              : 
     130              :   // Pointer to DD hdf5 file
     131            0 :   std::unique_ptr<hdf5libs::HDF5RawDataFile> tpstream_file;
     132              : 
     133            0 :   try {
     134            0 :     tpstream_file = std::make_unique<hdf5libs::HDF5RawDataFile>(input_file);
     135            0 :   } catch(const hdf5libs::FileOpenFailed& e)   {
     136            0 :     std::cout << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ERROR <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" << std::endl;
     137            0 :     std::cerr << e.what() << '\n';
     138            0 :     exit(-1);
     139            0 :   }
     140              : 
     141            0 :   fmt::print("{} opened\n", input_file);
     142              : 
     143              :   // Check the record type
     144            0 :   fmt::print("  File type: {}\n", tpstream_file->get_record_type());
     145              : 
     146              :   // Extract the list of timeslices in the file
     147            0 :   auto records = tpstream_file->get_all_record_ids();  
     148            0 :   std::set<daqdataformats::SourceID> source_ids;
     149              :   // Find what Source IDs are available in the file (all records)
     150            0 :   for( const auto& rid : records ) {
     151            0 :     const auto& [id, slice] = rid;
     152            0 :     auto sids = tpstream_file->get_source_ids(rid);
     153            0 :     source_ids.merge(sids); 
     154            0 :     if (verbose)
     155            0 :       fmt::print("TR {}:{} [{}]\n", id, slice, fmt::join(sids, ", "));
     156            0 :   }
     157            0 :   fmt::print("Source IDs [{}]\n", fmt::join(source_ids, ", "));
     158              : 
     159              :   // Map of source ids to trigger records 
     160            0 :   std::map<daqdataformats::SourceID, hdf5libs::HDF5RawDataFile::record_id_set> m;
     161            0 :   for( const auto& sid: source_ids ) {
     162            0 :     for( const auto& rid : records ) {
     163            0 :       auto rec_sids = tpstream_file->get_source_ids(rid);
     164            0 :       if (rec_sids.find(sid) != rec_sids.end()) {
     165            0 :         m[sid].insert(rid);
     166              :       }
     167            0 :     }
     168            0 :     if (verbose)
     169            0 :       fmt::print("Record IDs for {} : [{}]\n", sid, fmt::join(m[sid], ", "));
     170              :   }
     171              : 
     172              :   // Print the number of timeslices in the files
     173            0 :   fmt::print("  Number of time slices in file: {}\n", records.size());
     174              : 
     175              : 
     176              :   // Get the list of records containing the tp writer source id
     177              :   // I know the nuber by spying into the TP Stream File
     178            0 :   daqdataformats::SourceID tp_writer_sid{daqdataformats::SourceID::Subsystem::kTrigger, 0};
     179            0 :   auto tp_records = m[tp_writer_sid];
     180              : 
     181              :   // Prepare the TP buffer
     182            0 :   std::vector<trgdataformats::TriggerPrimitive> tp_buffer;
     183              : 
     184            0 :   auto a_slice_id = *tp_records.begin();
     185            0 :   fmt::print("Processing tp time slice {}\n", a_slice_id);
     186              : 
     187              :   // auto tsl_hdr = tsl.get_header();
     188            0 :   auto tsl_hdr = tpstream_file->get_tsh_ptr(a_slice_id);
     189              : 
     190              :   // Print header ingo
     191            0 :   fmt::print("  Run number: {}\n", tsl_hdr->run_number);
     192            0 :   fmt::print("  TSL number: {}\n", tsl_hdr->timeslice_number);
     193              : 
     194            0 :   auto frag = tpstream_file->get_frag_ptr(a_slice_id, tp_writer_sid);
     195              : 
     196            0 :   fmt::print("  Fragment id: {} [{}]\n", frag->get_element_id().to_string(), daqdataformats::fragment_type_to_string(frag->get_fragment_type()));
     197              : 
     198            0 :   size_t n_tps = frag->get_data_size()/sizeof(trgdataformats::TriggerPrimitive);
     199            0 :   fmt::print("TP fragment size: {}\n", frag->get_data_size());
     200            0 :   fmt::print("Num TPs: {}\n", n_tps);
     201              : 
     202            0 :   trgdataformats::TriggerPrimitive* tp_array = static_cast<trgdataformats::TriggerPrimitive*>(frag->get_data());
     203              : 
     204              :   // Prepare the TP buffer, checking for time ordering
     205            0 :   tp_buffer.resize(tp_buffer.size()+n_tps);
     206              : 
     207            0 :   uint64_t last_ts = 0;
     208            0 :   for(size_t i(0); i<n_tps; ++i) {
     209            0 :     auto& tp = tp_array[i];
     210            0 :     if (tp.time_start <= last_ts) {
     211            0 :       fmt::print("ERROR: {} {} ", +tp.time_start, last_ts );
     212              :     }
     213            0 :     tp_buffer.push_back(tp);
     214              :   }
     215              : 
     216              :   // Print some useful info
     217            0 :   uint64_t d_ts = tp_array[n_tps-1].time_start - tp_array[0].time_start;
     218            0 :   fmt::print("TS gap: {} {} ms\n", d_ts, d_ts*16.0/1'000'000);
     219              : 
     220              :   // Finally create a TA maker
     221              :   // Waiting for A.Oranday's factory!
     222            0 :   triggeralgs::TAMakerHorizontalMuonAlgorithm hmta;
     223              : 
     224              :   // Create output buffer
     225            0 :   std::vector<triggeralgs::TriggerActivity> ta_buffer;
     226              : 
     227              :   // We should config the algo, really
     228            0 :   const nlohmann::json config = {};
     229            0 :   hmta.configure(config);
     230              : 
     231              :   // Loop over TPs
     232            0 :   for( const auto& tp : tp_buffer ) {
     233            0 :     hmta(tp, ta_buffer);
     234              :   }
     235              : 
     236              :   // Count number of TAs generated
     237            0 :   fmt::print("ta_buffer.size() = {}\n", ta_buffer.size());
     238              : 
     239            0 :   size_t payload_size(0);
     240            0 :   for ( const auto& ta : ta_buffer ) {
     241            0 :     payload_size += triggeralgs::get_overlay_nbytes(ta);
     242              : 
     243              :   }
     244              : 
     245              :   // Count number of TAs generated
     246            0 :   fmt::print("ta_buffer in bytes = {}\n", payload_size);
     247              : 
     248            0 :   char* payload = static_cast<char*>(malloc(payload_size));
     249              : 
     250              : 
     251            0 :   size_t offset(0);
     252            0 :   for ( const auto& ta : ta_buffer ) {
     253            0 :     triggeralgs::write_overlay(ta, static_cast<void*>(payload+offset));
     254            0 :     offset += triggeralgs::get_overlay_nbytes(ta);
     255              :   }
     256              : 
     257            0 :   daqdataformats::Fragment ta_frag(static_cast<void*>(payload), payload_size);
     258              : 
     259            0 :   free(static_cast<void*>(payload));
     260              : 
     261              : 
     262              :   // How to save to file?
     263            0 :   return 0;
     264            0 : }
        

Generated by: LCOV version 2.0-1