LCOV - code coverage report
Current view: top level - dfmodules/plugins - HDF5DataStore.hpp (source / functions) Coverage Total Hit
Test: code.result Lines: 52.7 % 226 119
Test Date: 2025-12-21 13:07:08 Functions: 57.9 % 19 11

            Line data    Source code
       1              : /**
       2              :  * @file HDF5DataStore.hpp
       3              :  *
       4              :  * An implementation of the DataStore interface that uses the
       5              :  * highFive library to create objects in HDF5 Groups and datasets
       6              :  *
       7              :  * This is part of the DUNE DAQ Application Framework, copyright 2020.
       8              :  * Licensing/copyright details are in the COPYING file that you should have
       9              :  * received with this code.
      10              :  */
      11              : 
      12              : #ifndef DFMODULES_PLUGINS_HDF5DATASTORE_HPP_
      13              : #define DFMODULES_PLUGINS_HDF5DATASTORE_HPP_
      14              : 
      15              : #include "HDF5FileUtils.hpp"
      16              : #include "dfmodules/DataStore.hpp"
      17              : #include "dfmodules/opmon/DataStore.pb.h"
      18              : 
      19              : #include "hdf5libs/HDF5RawDataFile.hpp"
      20              : 
      21              : #include "appmodel/DataStoreConf.hpp"
      22              : #include "appmodel/FilenameParams.hpp"
      23              : #include "confmodel/DetectorConfig.hpp"
      24              : #include "confmodel/Session.hpp"
      25              : 
      26              : #include "appfwk/DAQModule.hpp"
      27              : #include "logging/Logging.hpp" // NOTE: if ISSUES ARE DECLARED BEFORE include logging/Logging.hpp, TLOG_DEBUG<<issue wont work.
      28              : 
      29              : #include "boost/date_time/posix_time/posix_time.hpp"
      30              : #include "boost/lexical_cast.hpp"
      31              : 
      32              : #include <cstdlib>
      33              : #include <functional>
      34              : #include <memory>
      35              : #include <string>
      36              : #include <sys/statvfs.h>
      37              : #include <utility>
      38              : #include <vector>
      39              : 
      40              : namespace dunedaq {
      41              : 
      42              : // Disable coverage checking LCOV_EXCL_START
      43              : /**
      44              :  * @brief A ERS Issue to report an HDF5 exception
      45              :  */
      46              : ERS_DECLARE_ISSUE_BASE(dfmodules,
      47              :                        InvalidOperationMode,
      48              :                        appfwk::GeneralDAQModuleIssue,
      49              :                        "Selected operation mode \"" << selected_operation
      50              :                                                     << "\" is NOT supported. Please update the configuration file.",
      51              :                        ((std::string)name),
      52              :                        ((std::string)selected_operation))
      53              : 
      54              : ERS_DECLARE_ISSUE_BASE(dfmodules,
      55              :                        FileOperationProblem,
      56              :                        appfwk::GeneralDAQModuleIssue,
      57              :                        "A problem was encountered when opening or closing file \"" << filename << "\"",
      58              :                        ((std::string)name),
      59              :                        ((std::string)filename))
      60              : 
      61              : ERS_DECLARE_ISSUE_BASE(dfmodules,
      62              :                        InvalidHDF5Dataset,
      63              :                        appfwk::GeneralDAQModuleIssue,
      64              :                        "The HDF5 Dataset associated with name \"" << data_set << "\" is invalid. (file = " << filename
      65              :                                                                   << ")",
      66              :                        ((std::string)name),
      67              :                        ((std::string)data_set)((std::string)filename))
      68              : 
      69              : ERS_DECLARE_ISSUE_BASE(dfmodules,
      70              :                        InvalidOutputPath,
      71              :                        appfwk::GeneralDAQModuleIssue,
      72              :                        "The specified output destination, \"" << output_path
      73              :                                                               << "\", is not a valid file system path on this server.",
      74              :                        ((std::string)name),
      75              :                        ((std::string)output_path))
      76              : 
      77              : ERS_DECLARE_ISSUE_BASE(dfmodules,
      78              :                        InsufficientDiskSpace,
      79              :                        appfwk::GeneralDAQModuleIssue,
      80              :                        "There is insufficient free space on the disk associated with output file path \""
      81              :                          << path << "\". There are " << free_bytes << " bytes free, and the "
      82              :                          << "required minimum is " << needed_bytes << " bytes based on " << criteria << ".",
      83              :                        ((std::string)name),
      84              :                        ((std::string)path)((size_t)free_bytes)((size_t)needed_bytes)((std::string)criteria))
      85              : 
      86              : ERS_DECLARE_ISSUE_BASE(dfmodules,
      87              :                        EmptyDataBlockList,
      88              :                        appfwk::GeneralDAQModuleIssue,
      89              :                        "There was a request to write out a list of data blocks, but the list was empty. "
      90              :                          << "Ignoring this request",
      91              :                        ((std::string)name),
      92              :                        ERS_EMPTY)
      93              : 
      94              : // Re-enable coverage checking LCOV_EXCL_STOP
      95              : namespace dfmodules {
      96              : 
      97              : /**
      98              :  * @brief HDF5DataStore creates an HDF5 instance
      99              :  * of the DataStore class
     100              :  */
     101              : class HDF5DataStore : public DataStore
     102              : {
     103              : 
     104              : public:
     105              :   enum
     106              :   {
     107              :     TLVL_BASIC = 2,
     108              :     TLVL_FILE_SIZE = 5
     109              :   };
     110              : 
     111              :   /**
     112              :    * @brief HDF5DataStore Constructor
     113              :    * @param name, path, filename, operationMode
     114              :    *
     115              :    */
     116            5 :   explicit HDF5DataStore(std::string const& name,
     117              :                          std::shared_ptr<appfwk::ConfigurationManager> mcfg,
     118              :                          std::string const& writer_name)
     119            5 :     : DataStore(name)
     120            5 :     , m_basic_name_of_open_file("")
     121            5 :     , m_open_flags_of_open_file(0)
     122            5 :     , m_run_number(0)
     123           10 :     , m_writer_identifier(writer_name)
     124              :   {
     125            5 :     TLOG_DEBUG(TLVL_BASIC) << get_name();
     126              : 
     127            5 :     m_config_params = mcfg->get_dal<appmodel::DataStoreConf>(name);
     128            5 :     m_file_layout_params = m_config_params->get_file_layout_params();
     129            5 :     m_session = mcfg->get_session();
     130            5 :     m_operational_environment = mcfg->get_session()->get_detector_configuration()->get_op_env();
     131            5 :     m_offline_data_stream = mcfg->get_session()->get_detector_configuration()->get_offline_data_stream();
     132              : 
     133            5 :     m_operation_mode = m_config_params->get_mode();
     134            5 :     m_path = m_config_params->get_directory_path();
     135            5 :     m_max_file_size = m_config_params->get_max_file_size();
     136            5 :     m_disable_unique_suffix = m_config_params->get_disable_unique_filename_suffix();
     137            5 :     m_free_space_safety_factor_for_write = m_config_params->get_free_space_safety_factor();
     138            5 :     if (m_free_space_safety_factor_for_write < 1.1) {
     139            0 :       m_free_space_safety_factor_for_write = 1.1;
     140              :     }
     141            5 :     m_compression_level = m_config_params->get_compression_level();
     142              : 
     143            5 :     m_file_index = 0;
     144            5 :     m_recorded_size = 0;
     145            5 :     m_uncompressed_raw_data_size = 0;
     146            5 :     m_current_record_number = std::numeric_limits<size_t>::max();
     147              : 
     148            5 :     if (m_operation_mode != "one-event-per-file"
     149              :         //&& m_operation_mode != "one-fragment-per-file"
     150            5 :         && m_operation_mode != "all-per-file") {
     151              : 
     152            0 :       throw InvalidOperationMode(ERS_HERE, get_name(), m_operation_mode);
     153              :     }
     154              : 
     155              :     // 05-Apr-2022, KAB: added warning message when the output destination
     156              :     // is not a valid directory.
     157            5 :     struct statvfs vfs_results;
     158            5 :     int retval = statvfs(m_path.c_str(), &vfs_results);
     159            5 :     if (retval != 0) {
     160            0 :       ers::warning(InvalidOutputPath(ERS_HERE, get_name(), m_path));
     161              :     }
     162            5 :   }
     163              : 
     164              :   /**
     165              :    * @brief HDF5DataStore write()
     166              :    * Method used to write constant data
     167              :    * into HDF5 format. Operational mode
     168              :    * defined in the configuration file.
     169              :    *
     170              :    */
     171           35 :   virtual void write(const daqdataformats::TriggerRecord& tr)
     172              :   {
     173              : 
     174              :     // check if there is sufficient space for this record
     175           35 :     size_t current_free_space = get_free_space(m_path);
     176           35 :     size_t tr_size = tr.get_total_size_bytes();
     177           35 :     if (current_free_space < (m_free_space_safety_factor_for_write * tr_size)) {
     178            0 :       std::ostringstream msg_oss;
     179            0 :       msg_oss << "a safety factor of " << m_free_space_safety_factor_for_write << " times the trigger record size";
     180            0 :       InsufficientDiskSpace issue(ERS_HERE,
     181            0 :                                   get_name(),
     182              :                                   m_path,
     183              :                                   current_free_space,
     184            0 :                                   (m_free_space_safety_factor_for_write * tr_size),
     185            0 :                                   msg_oss.str());
     186            0 :       std::string msg =
     187            0 :         "writing a trigger record to file" + (m_file_handle ? " " + m_file_handle->get_file_name() : "");
     188            0 :       throw RetryableDataStoreProblem(ERS_HERE, get_name(), msg, issue);
     189            0 :     }
     190              : 
     191              :     // check if a new file should be opened for this record
     192           35 :     size_t size_of_next_write = tr_size;
     193           35 :     if (m_compression_level != 0 && m_recorded_size != 0) {
     194              :       // Without compression, the uncompressed raw data size is approximately the total file size, so it
     195              :       // serves as an approximation of what would have been written without compression
     196            0 :       float compression_factor = (float) m_file_handle->get_uncompressed_raw_data_size() / m_file_handle->get_total_file_size();
     197            0 :       size_of_next_write = tr_size / compression_factor;
     198              :     }
     199           35 :     if (! increment_file_index_if_needed(size_of_next_write)) {
     200           29 :       if (m_operation_mode == "one-event-per-file") {
     201            5 :         if (m_current_record_number != std::numeric_limits<size_t>::max() &&
     202            4 :             tr.get_header_ref().get_trigger_number() != m_current_record_number) {
     203            4 :           ++m_file_index;
     204              :         }
     205              :       }
     206              :     }
     207           35 :     m_current_record_number = tr.get_header_ref().get_trigger_number();
     208              : 
     209              :     // determine the filename from Storage Key + configuration parameters
     210           35 :     std::string full_filename = get_file_name(tr.get_header_ref().get_run_number());
     211              : 
     212           35 :     try {
     213           35 :       open_file_if_needed(full_filename, HighFive::File::OpenOrCreate);
     214            0 :     } catch (std::exception const& excpt) {
     215            0 :       throw FileOperationProblem(ERS_HERE, get_name(), full_filename, excpt);
     216            0 :     } catch (...) { // NOLINT(runtime/exceptions)
     217              :       // NOLINT here because we *ARE* re-throwing the exception!
     218            0 :       throw FileOperationProblem(ERS_HERE, get_name(), full_filename);
     219            0 :     }
     220              : 
     221              :     // write the record
     222           35 :     m_file_handle->write(tr);
     223           35 :     m_recorded_size = m_file_handle->get_recorded_size();
     224           35 :     m_uncompressed_raw_data_size = m_file_handle->get_uncompressed_raw_data_size();
     225           35 :     m_total_file_size = m_file_handle->get_total_file_size();
     226              : 
     227           35 :     m_new_bytes += m_total_file_size - m_previous_file_size;
     228           35 :     ++m_new_objects;
     229           70 :     m_previous_file_size.store(m_total_file_size.load());
     230           35 :   }
     231              : 
     232              :   /**
     233              :    * @brief HDF5DataStore write()
     234              :    * Method used to write constant data
     235              :    * into HDF5 format. Operational mode
     236              :    * defined in the configuration file.
     237              :    *
     238              :    */
     239            0 :   virtual void write(const daqdataformats::TimeSlice& ts)
     240              :   {
     241              : 
     242              :     // check if there is sufficient space for this record
     243            0 :     size_t current_free_space = get_free_space(m_path);
     244            0 :     size_t ts_size = ts.get_total_size_bytes();
     245            0 :     if (current_free_space < (m_free_space_safety_factor_for_write * ts_size)) {
     246            0 :       std::ostringstream msg_oss;
     247            0 :       msg_oss << "a safety factor of " << m_free_space_safety_factor_for_write << " times the time slice size";
     248            0 :       InsufficientDiskSpace issue(ERS_HERE,
     249            0 :                                   get_name(),
     250              :                                   m_path,
     251              :                                   current_free_space,
     252            0 :                                   (m_free_space_safety_factor_for_write * ts_size),
     253            0 :                                   msg_oss.str());
     254            0 :       std::string msg = "writing a time slice to file " + m_file_handle->get_file_name();
     255            0 :       throw RetryableDataStoreProblem(ERS_HERE, get_name(), msg, issue);
     256            0 :     }
     257              : 
     258              :     // check if a new file should be opened for this record
     259            0 :     size_t size_of_next_write = ts_size;
     260            0 :     if (m_compression_level != 0 && m_recorded_size != 0) {
     261              :       // Without compression, the uncompressed raw data size is approximately the total file size, so it
     262              :       // serves as an approximation of what would have been written without compression
     263            0 :       float compression_factor = (float) m_file_handle->get_uncompressed_raw_data_size() / m_file_handle->get_total_file_size();
     264            0 :       size_of_next_write = ts_size / compression_factor;
     265              :     }
     266            0 :     if (! increment_file_index_if_needed(size_of_next_write)) {
     267            0 :       if (m_operation_mode == "one-event-per-file") {
     268            0 :         if (m_current_record_number != std::numeric_limits<size_t>::max() &&
     269            0 :             ts.get_header().timeslice_number != m_current_record_number) {
     270            0 :           ++m_file_index;
     271              :         }
     272              :       }
     273              :     }
     274            0 :     m_current_record_number = ts.get_header().timeslice_number;
     275              : 
     276              :     // determine the filename from Storage Key + configuration parameters
     277            0 :     std::string full_filename = get_file_name(ts.get_header().run_number);
     278              : 
     279            0 :     try {
     280            0 :       open_file_if_needed(full_filename, HighFive::File::OpenOrCreate);
     281            0 :     } catch (std::exception const& excpt) {
     282            0 :       throw FileOperationProblem(ERS_HERE, get_name(), full_filename, excpt);
     283            0 :     } catch (...) { // NOLINT(runtime/exceptions)
     284              :       // NOLINT here because we *ARE* re-throwing the exception!
     285            0 :       throw FileOperationProblem(ERS_HERE, get_name(), full_filename);
     286            0 :     }
     287              : 
     288              :     // write the record
     289            0 :     try {
     290            0 :       m_file_handle->write(ts);
     291            0 :       m_recorded_size = m_file_handle->get_recorded_size();
     292            0 :       m_uncompressed_raw_data_size = m_file_handle->get_uncompressed_raw_data_size();
     293            0 :       m_total_file_size = m_file_handle->get_total_file_size();
     294            0 :     } catch (hdf5libs::TimeSliceAlreadyExists const& excpt) {
     295            0 :       std::string msg = "writing a time slice to file " + m_file_handle->get_file_name();
     296            0 :       throw IgnorableDataStoreProblem(ERS_HERE, get_name(), msg, excpt);
     297            0 :     }
     298              : 
     299            0 :     m_new_bytes += m_total_file_size - m_previous_file_size;
     300            0 :     ++m_new_objects;
     301            0 :     m_previous_file_size.store(m_total_file_size.load());
     302            0 :   }
     303              : 
     304              :   /**
     305              :    * @brief Informs the HDF5DataStore that writes or reads of records
     306              :    * associated with the specified run number will soon be requested.
     307              :    * This allows the DataStore to test that the output file path is valid
     308              :    * and any other checks that are useful in advance of the first data
     309              :    * blocks being written or read.
     310              :    *
     311              :    * This method may throw an exception if it finds a problem.
     312              :    */
     313            0 :   void prepare_for_run(daqdataformats::run_number_t run_number,
     314              :                        bool run_is_for_test_purposes)
     315              :   {
     316            0 :     m_run_number = run_number;
     317            0 :     m_run_is_for_test_purposes = run_is_for_test_purposes;
     318              : 
     319            0 :     struct statvfs vfs_results;
     320            0 :     TLOG_DEBUG(TLVL_BASIC) << get_name() << ": Preparing to get the statvfs results for path: \"" << m_path << "\"";
     321              : 
     322            0 :     int retval = statvfs(m_path.c_str(), &vfs_results);
     323            0 :     TLOG_DEBUG(TLVL_BASIC) << get_name() << ": statvfs return code is " << retval;
     324            0 :     if (retval != 0) {
     325            0 :       throw InvalidOutputPath(ERS_HERE, get_name(), m_path);
     326              :     }
     327              : 
     328            0 :     size_t free_space = vfs_results.f_bsize * vfs_results.f_bavail;
     329            0 :     TLOG_DEBUG(TLVL_BASIC) << get_name() << ": Free space on disk with path \"" << m_path << "\" is " << free_space
     330            0 :                            << " bytes. This will be compared with the maximum size of a single file ("
     331            0 :                            << m_max_file_size << ") as a simple test to see if there is enough free space.";
     332            0 :     if (free_space < m_max_file_size) {
     333            0 :       throw InsufficientDiskSpace(
     334            0 :         ERS_HERE, get_name(), m_path, free_space, m_max_file_size, "the configured maximum size of a single file");
     335              :     }
     336              : 
     337            0 :     m_file_index = 0;
     338            0 :     m_recorded_size = 0;
     339            0 :     m_uncompressed_raw_data_size = 0;
     340            0 :     m_current_record_number = std::numeric_limits<size_t>::max();
     341            0 :   }
     342              : 
     343              :   /**
     344              :    * @brief Informs the HD5DataStore that writes or reads of records
     345              :    * associated with the specified run number have finished, for now.
     346              :    * This allows the DataStore to close open files and do any other
     347              :    * cleanup or shutdown operations that are useful once the writes or
     348              :    * reads for a given run number have finished.
     349              :    */
     350            0 :   void finish_with_run(daqdataformats::run_number_t /*run_number*/)
     351              :   {
     352            0 :     if (m_file_handle.get() != nullptr) {
     353            0 :       std::string open_filename = m_file_handle->get_file_name();
     354            0 :       try {
     355            0 :         m_file_handle.reset();
     356            0 :         m_run_number = 0;
     357              :       } catch (std::exception const& excpt) {
     358              :         m_run_number = 0;
     359              :         throw FileOperationProblem(ERS_HERE, get_name(), open_filename, excpt);
     360              :       } catch (...) { // NOLINT(runtime/exceptions)
     361              :         m_run_number = 0;
     362              :         // NOLINT here because we *ARE* re-throwing the exception!
     363              :         throw FileOperationProblem(ERS_HERE, get_name(), open_filename);
     364              :       }
     365            0 :     }
     366            0 :   }
     367              : 
     368              : protected:
     369            0 :   void generate_opmon_data() override
     370              :   {
     371              : 
     372            0 :     opmon::HDF5DataStoreInfo info;
     373              : 
     374            0 :     info.set_new_bytes_output(m_new_bytes.exchange(0));
     375            0 :     info.set_new_written_object(m_new_objects.exchange(0));
     376            0 :     info.set_bytes_in_file(m_total_file_size.load());
     377            0 :     info.set_written_files(m_file_index.load());
     378            0 :     publish(std::move(info), { { "path", m_path } });
     379            0 :   }
     380              : 
     381              : private:
     382              :   HDF5DataStore(const HDF5DataStore&) = delete;
     383              :   HDF5DataStore& operator=(const HDF5DataStore&) = delete;
     384              :   HDF5DataStore(HDF5DataStore&&) = delete;
     385              :   HDF5DataStore& operator=(HDF5DataStore&&) = delete;
     386              : 
     387              :   std::unique_ptr<hdf5libs::HDF5RawDataFile> m_file_handle;
     388              :   const appmodel::HDF5FileLayoutParams* m_file_layout_params;
     389              :   std::string m_basic_name_of_open_file;
     390              :   unsigned m_open_flags_of_open_file;
     391              :   daqdataformats::run_number_t m_run_number;
     392              :   bool m_run_is_for_test_purposes;
     393              :   const confmodel::Session* m_session;
     394              :   std::string m_operational_environment;
     395              :   std::string m_offline_data_stream;
     396              :   std::string m_writer_identifier;
     397              : 
     398              :   // Total number of generated files
     399              :   std::atomic<size_t> m_file_index;
     400              : 
     401              :   // Size of data being written, excluding metadata
     402              :   std::atomic<size_t> m_recorded_size;
     403              : 
     404              :   // Theoretical, "uncompressed" size of data being written, excluding metadata
     405              :   std::atomic<size_t> m_uncompressed_raw_data_size;
     406              : 
     407              :   // Used for tracking the "delta" of the current write
     408              :   std::atomic<size_t> m_previous_file_size = 0;
     409              : 
     410              :   // Total size of the file, including raw data, metadata, and free space
     411              :   std::atomic<size_t> m_total_file_size;
     412              : 
     413              :   // Record number for the record that is currently being written out
     414              :   // This is only useful for long-readout windows, in which there may
     415              :   // be multiple calls to write()
     416              :   size_t m_current_record_number;
     417              : 
     418              :   // incremental written data
     419              :   std::atomic<uint64_t> m_new_bytes;
     420              :   std::atomic<uint64_t> m_new_objects;
     421              : 
     422              :   // Configuration
     423              :   const appmodel::DataStoreConf* m_config_params;
     424              :   std::string m_operation_mode;
     425              :   std::string m_path;
     426              :   size_t m_max_file_size;
     427              :   bool m_disable_unique_suffix;
     428              :   float m_free_space_safety_factor_for_write;
     429              :   unsigned m_compression_level;
     430              : 
     431              :   // std::unique_ptr<HDF5KeyTranslator> m_key_translator_ptr;
     432              : 
     433              :   /**
     434              :    * @brief Translates the specified input parameters into the appropriate filename.
     435              :    */
     436           35 :   std::string get_file_name(daqdataformats::run_number_t run_number)
     437              :   {
     438           35 :     std::ostringstream work_oss;
     439           35 :     work_oss << m_config_params->get_directory_path();
     440           35 :     if (work_oss.str().length() > 0) {
     441           35 :       work_oss << "/";
     442              :     }
     443           35 :     work_oss << m_operational_environment + "_" + m_config_params->get_filename_params()->get_file_type_prefix();
     444           35 :     if (work_oss.str().length() > 0) {
     445           35 :       work_oss << "_";
     446              :     }
     447              : 
     448           35 :     work_oss << m_config_params->get_filename_params()->get_run_number_prefix();
     449           35 :     work_oss << std::setw(m_config_params->get_filename_params()->get_digits_for_run_number()) << std::setfill('0')
     450           35 :              << run_number;
     451           35 :     work_oss << "_";
     452              : 
     453           35 :     work_oss << m_config_params->get_filename_params()->get_file_index_prefix();
     454           35 :     work_oss << std::setw(m_config_params->get_filename_params()->get_digits_for_file_index()) << std::setfill('0')
     455           35 :              << m_file_index;
     456              : 
     457           35 :     work_oss << "_" << m_writer_identifier;
     458           35 :     work_oss << ".hdf5";
     459           70 :     return work_oss.str();
     460           35 :   }
     461              : 
     462           35 :   bool increment_file_index_if_needed(size_t size_of_next_write)
     463              :   {
     464           35 :     if ((m_total_file_size + size_of_next_write) > m_max_file_size && m_recorded_size > 0) {
     465            6 :       ++m_file_index;
     466            6 :       m_recorded_size = 0;
     467            6 :       m_uncompressed_raw_data_size = 0;
     468            6 :       m_previous_file_size.store(0);
     469            6 :       return true;
     470              :     }
     471              :     return false;
     472              :   }
     473              : 
     474           35 :   void open_file_if_needed(const std::string& file_name, unsigned open_flags = HighFive::File::ReadOnly)
     475              :   {
     476              : 
     477           35 :     if (m_file_handle.get() == nullptr || m_basic_name_of_open_file.compare(file_name) ||
     478           20 :         m_open_flags_of_open_file != open_flags) {
     479              : 
     480              :       // 04-Feb-2021, KAB: adding unique substrings to the filename
     481           15 :       std::string unique_filename = file_name;
     482           15 :       time_t now = time(0);
     483           15 :       std::string file_creation_timestamp = boost::posix_time::to_iso_string(boost::posix_time::from_time_t(now));
     484           15 :       if (!m_disable_unique_suffix) {
     485              :         // timestamp substring
     486           15 :         size_t ufn_len = unique_filename.length();
     487           15 :         if (ufn_len > 6) { // len GT 6 gives us some confidence that we have at least x.hdf5
     488           15 :           std::string timestamp_substring = "_" + file_creation_timestamp;
     489           15 :           TLOG_DEBUG(TLVL_BASIC) << get_name() << ": timestamp substring for filename: " << timestamp_substring;
     490           15 :           unique_filename.insert(ufn_len - 5, timestamp_substring);
     491           15 :         }
     492              :       }
     493              : 
     494              :       // close an existing open file
     495           15 :       if (m_file_handle.get() != nullptr) {
     496           10 :         std::string open_filename = m_file_handle->get_file_name();
     497           10 :         try {
     498           10 :           m_file_handle.reset();
     499              :         } catch (std::exception const& excpt) {
     500              :           throw FileOperationProblem(ERS_HERE, get_name(), open_filename, excpt);
     501              :         } catch (...) { // NOLINT(runtime/exceptions)
     502              :           // NOLINT here because we *ARE* re-throwing the exception!
     503              :           throw FileOperationProblem(ERS_HERE, get_name(), open_filename);
     504              :         }
     505           10 :       }
     506              : 
     507              :       // opening file for the first time OR something changed in the name or the way of opening the file
     508           15 :       TLOG_DEBUG(TLVL_BASIC) << get_name() << ": going to open file " << unique_filename << " with open_flags "
     509           15 :                              << std::to_string(open_flags);
     510           15 :       m_basic_name_of_open_file = file_name;
     511           15 :       m_open_flags_of_open_file = open_flags;
     512           15 :       try {
     513           15 :         m_file_handle.reset(
     514              :           new hdf5libs::HDF5RawDataFile(unique_filename,
     515              :                                         m_run_number,
     516              :                                         m_file_index,
     517           15 :                                         m_writer_identifier,
     518              :                                         m_file_layout_params,
     519           45 :                                         hdf5libs::HDF5SourceIDHandler::make_source_id_geo_id_map(m_session),
     520              :                                         m_compression_level,
     521              :                                         ".writing",
     522           30 :                                         open_flags));
     523            0 :       } catch (std::exception const& excpt) {
     524            0 :         throw FileOperationProblem(ERS_HERE, get_name(), unique_filename, excpt);
     525            0 :       } catch (...) { // NOLINT(runtime/exceptions)
     526              :         // NOLINT here because we *ARE* re-throwing the exception!
     527            0 :         throw FileOperationProblem(ERS_HERE, get_name(), unique_filename);
     528            0 :       }
     529              : 
     530           15 :       if (open_flags == HighFive::File::ReadOnly) {
     531            0 :         TLOG_DEBUG(TLVL_BASIC) << get_name() << "Opened HDF5 file read-only.";
     532              :       } else {
     533           15 :         TLOG_DEBUG(TLVL_BASIC) << get_name() << "Created HDF5 file (" << unique_filename << ").";
     534              : 
     535              :         // write attributes that aren't being handled by the HDF5RawDataFile right now
     536              :         // m_file_handle->write_attribute("data_format_version",(int)m_key_translator_ptr->get_current_version());
     537           15 :         m_file_handle->write_attribute("operational_environment", (std::string)m_operational_environment);
     538           15 :         m_file_handle->write_attribute("offline_data_stream", (std::string)m_offline_data_stream);
     539           15 :         m_file_handle->write_attribute("run_was_for_test_purposes", (std::string)(m_run_is_for_test_purposes ? "true" : "false"));
     540              :       }
     541           15 :     } else {
     542           20 :       TLOG_DEBUG(TLVL_BASIC) << get_name() << ": Pointer file to  " << m_basic_name_of_open_file
     543           20 :                              << " was already opened with open_flags " << std::to_string(m_open_flags_of_open_file);
     544              :     }
     545           35 :   }
     546              : 
     547           35 :   size_t get_free_space(const std::string& the_path)
     548              :   {
     549           35 :     struct statvfs vfs_results;
     550           35 :     int retval = statvfs(the_path.c_str(), &vfs_results);
     551           35 :     if (retval != 0) {
     552              :       return 0;
     553              :     }
     554           35 :     return vfs_results.f_bsize * vfs_results.f_bavail;
     555              :   }
     556              : };
     557              : 
     558              : } // namespace dfmodules
     559              : } // namespace dunedaq
     560              : 
     561              : #endif // DFMODULES_PLUGINS_HDF5DATASTORE_HPP_
     562              : 
     563              : // Local Variables:
     564              : // c-basic-offset: 2
     565              : // End:
        

Generated by: LCOV version 2.0-1