diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index b84d3191872d..f9bfb0c7c14a 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -11,5 +11,6 @@ # git blame --ignore-revs-file .git-blame-ignore-revs dccacdbddbc28f43a4ab9d1b63caf2fda71b1688 +80a5951cd11c70f24836b934d832ef7c265daa84 9485437150f800515afbf15ab6ef05108714a0eb a90b115e443e27eb187aba3cd794eee591079401 diff --git a/Framework/DataHandling/CMakeLists.txt b/Framework/DataHandling/CMakeLists.txt index 0ae4a55f6b70..6f664cee9a98 100644 --- a/Framework/DataHandling/CMakeLists.txt +++ b/Framework/DataHandling/CMakeLists.txt @@ -1,4 +1,5 @@ set(SRC_FILES + src/AlignAndFocusPowderSlim.cpp src/ApplyDiffCal.cpp src/BankPulseTimes.cpp src/CheckMantidVersion.cpp @@ -214,6 +215,7 @@ set(SRC_FILES ) set(INC_FILES + inc/MantidDataHandling/AlignAndFocusPowderSlim.h inc/MantidDataHandling/ApplyDiffCal.h inc/MantidDataHandling/BankPulseTimes.h inc/MantidDataHandling/BitStream.h @@ -433,6 +435,7 @@ set(INC_FILES ) set(TEST_FILES + AlignAndFocusPowderSlimTest.h ApplyDiffCalTest.h BankPulseTimesTest.h CheckMantidVersionTest.h @@ -455,8 +458,8 @@ set(TEST_FILES ExtractPolarizationEfficienciesTest.h FindDetectorsInShapeTest.h FindDetectorsParTest.h - GenerateGroupingPowderTest.h GenerateGroupingPowder2Test.h + GenerateGroupingPowderTest.h GroupDetectors2Test.h GroupDetectorsTest.h ISISDataArchiveTest.h diff --git a/Framework/DataHandling/inc/MantidDataHandling/AlignAndFocusPowderSlim.h b/Framework/DataHandling/inc/MantidDataHandling/AlignAndFocusPowderSlim.h new file mode 100644 index 000000000000..f20ba7e9a4ba --- /dev/null +++ b/Framework/DataHandling/inc/MantidDataHandling/AlignAndFocusPowderSlim.h @@ -0,0 +1,70 @@ +// Mantid Repository : https://github.com/mantidproject/mantid +// +// Copyright © 2024 ISIS Rutherford Appleton Laboratory UKRI, +// NScD Oak Ridge National Laboratory, European Spallation Source, +// Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS +// SPDX - License - Identifier: GPL - 3.0 + +#pragma once + +#include "MantidAPI/Algorithm.h" +#include "MantidAPI/MatrixWorkspace_fwd.h" +#include "MantidDataHandling/DllConfig.h" +#include "MantidGeometry/IDTypes.h" + +namespace NeXus { +class File; +} + +namespace Mantid { +namespace DataHandling { + +/** AlignAndFocusPowderSlim : TODO: DESCRIPTION + */ +class MANTID_DATAHANDLING_DLL AlignAndFocusPowderSlim : public API::Algorithm { +public: + const std::string name() const override; + int version() const override; + const std::string category() const override; + const std::string summary() const override; + const std::vector seeAlso() const override; + + class MANTID_DATAHANDLING_DLL BankCalibration { + public: + BankCalibration(const detid_t idmin, const detid_t idmax, const std::map &calibration_map); + const double &value(const detid_t detid) const; + const detid_t &idmin() const; + detid_t idmax() const; + + private: + std::vector m_calibration; + detid_t m_detid_offset; + }; + +private: + void init() override; + std::map validateInputs() override; + void exec() override; + + API::MatrixWorkspace_sptr createOutputWorkspace(const size_t numHist, const bool linearBins, const double x_delta); + API::MatrixWorkspace_sptr editInstrumentGeometry(API::MatrixWorkspace_sptr &wksp, const double l1, + const std::vector &polars, + const std::vector &specids, + const std::vector &l2s, + const std::vector &azimuthals); + void initCalibrationConstants(API::MatrixWorkspace_sptr &wksp, const std::vector &difc_focus); + void loadCalFile(const API::Workspace_sptr &inputWS, const std::string &filename, + const std::vector &difc_focus); + + std::map m_calibration; // detid: 1/difc + std::set m_masked; + bool is_time_filtered{false}; + size_t pulse_start_index{0}; + size_t pulse_stop_index{std::numeric_limits::max()}; + /// Index to load start at in the file + std::vector loadStart; + /// How much to load in the file + std::vector loadSize; +}; + +} // namespace DataHandling +} // namespace Mantid diff --git a/Framework/DataHandling/src/AlignAndFocusPowderSlim.cpp b/Framework/DataHandling/src/AlignAndFocusPowderSlim.cpp new file mode 100644 index 000000000000..dba809cdb6d5 --- /dev/null +++ b/Framework/DataHandling/src/AlignAndFocusPowderSlim.cpp @@ -0,0 +1,827 @@ +// Mantid Repository : https://github.com/mantidproject/mantid +// +// Copyright © 2024 ISIS Rutherford Appleton Laboratory UKRI, +// NScD Oak Ridge National Laboratory, European Spallation Source, +// Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS +// SPDX - License - Identifier: GPL - 3.0 + + +#include "MantidDataHandling/AlignAndFocusPowderSlim.h" +#include "MantidAPI/Axis.h" +#include "MantidAPI/FileProperty.h" +#include "MantidAPI/MatrixWorkspace.h" +#include "MantidAPI/Run.h" +#include "MantidAPI/Sample.h" +#include "MantidDataHandling/LoadEventNexus.h" +#include "MantidDataObjects/EventList.h" +#include "MantidDataObjects/MaskWorkspace.h" +#include "MantidDataObjects/TableWorkspace.h" +#include "MantidDataObjects/Workspace2D.h" +#include "MantidDataObjects/WorkspaceCreation.h" +#include "MantidGeometry/Instrument/DetectorInfo.h" +#include "MantidKernel/ArrayLengthValidator.h" +#include "MantidKernel/ArrayProperty.h" +#include "MantidKernel/BoundedValidator.h" +#include "MantidKernel/EnumeratedString.h" +#include "MantidKernel/EnumeratedStringProperty.h" +#include "MantidKernel/TimeSeriesProperty.h" +#include "MantidKernel/Timer.h" +#include "MantidKernel/Unit.h" +#include "MantidKernel/VectorHelper.h" +#include "MantidNexus/H5Util.h" +#include "tbb/parallel_for.h" +#include "tbb/parallel_reduce.h" + +#include +#include +#include + +namespace Mantid::DataHandling { +using Mantid::API::FileProperty; +using Mantid::API::ITableWorkspace_sptr; +using Mantid::API::MatrixWorkspace_sptr; +using Mantid::API::WorkspaceProperty; +using Mantid::DataObjects::MaskWorkspace_sptr; +using Mantid::DataObjects::Workspace2D; +using Mantid::Kernel::ArrayLengthValidator; +using Mantid::Kernel::ArrayProperty; +using Mantid::Kernel::Direction; +using Mantid::Kernel::EnumeratedStringProperty; +using Mantid::Kernel::TimeSeriesProperty; + +namespace { // anonymous namespace +namespace PropertyNames { +const std::string FILENAME("Filename"); +const std::string CAL_FILE("CalFileName"); +const std::string FILTER_TIMESTART("FilterByTimeStart"); +const std::string FILTER_TIMESTOP("FilterByTimeStop"); +const std::string X_MIN("XMin"); +const std::string X_MAX("XMax"); +const std::string X_DELTA("XDelta"); +const std::string BINMODE("BinningMode"); +const std::string OUTPUT_WKSP("OutputWorkspace"); +const std::string READ_BANKS_IN_THREAD("ReadBanksInThread"); +const std::string READ_SIZE_FROM_DISK("ReadSizeFromDisk"); +const std::string EVENTS_PER_THREAD("EventsPerThread"); +} // namespace PropertyNames + +namespace NxsFieldNames { +const std::string TIME_OF_FLIGHT("event_time_offset"); +const std::string DETID("event_id"); +const std::string INDEX_ID("event_index"); +} // namespace NxsFieldNames + +// this is used for unit conversion to correct units +const std::string MICROSEC("microseconds"); + +const std::vector binningModeNames{"Logarithmic", "Linear"}; +enum class BinningMode { LOGARITHMIC, LINEAR, enum_count }; +typedef Mantid::Kernel::EnumeratedString BINMODE; + +// TODO refactor this to use the actual grouping +double getFocussedPostion(const detid_t detid, const std::vector &difc_focus) { + // grouping taken from the IDF for VULCAN + if (detid < 0) { + throw std::runtime_error("detid < 0 is not supported"); + } else if (detid < 100000) { // bank1 0-99095 + return difc_focus[0]; + } else if (detid < 200000) { // bank2 100000-199095 + return difc_focus[1]; + } else if (detid < 300000) { // bank3 200000-289095 + return difc_focus[2]; + } else if (detid < 400000) { // bank4 300000-389095 + return difc_focus[3]; + } else if (detid < 500000) { // bank5 400000-440095 + return difc_focus[4]; + } else if (detid < 600000) { // bank6 500000-554095 + return difc_focus[5]; + } else { + throw std::runtime_error("detid > 600000 is not supported"); + } +} + +} // namespace + +// Register the algorithm into the AlgorithmFactory +DECLARE_ALGORITHM(AlignAndFocusPowderSlim) + +//---------------------------------------------------------------------------------------------- + +/// Algorithms name for identification. @see Algorithm::name +const std::string AlignAndFocusPowderSlim::name() const { return "AlignAndFocusPowderSlim"; } + +/// Algorithm's version for identification. @see Algorithm::version +int AlignAndFocusPowderSlim::version() const { return 1; } + +/// Algorithm's category for identification. @see Algorithm::category +const std::string AlignAndFocusPowderSlim::category() const { return "Workflow\\Diffraction"; } + +/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary +const std::string AlignAndFocusPowderSlim::summary() const { + return "VULCAN ONLY Algorithm to focus powder diffraction data into a number of histograms according to a grouping " + "scheme defined in a CalFile."; +} + +const std::vector AlignAndFocusPowderSlim::seeAlso() const { return {"AlignAndFocusPowderFromFiles"}; } + +//---------------------------------------------------------------------------------------------- +namespace { // anonymous +std::vector calculate_difc_focused(const double l1, const std::vector &l2s, + const std::vector &polars) { + constexpr double deg2rad = M_PI / 180.; + + std::vector difc; + + std::transform(l2s.cbegin(), l2s.cend(), polars.cbegin(), std::back_inserter(difc), + [l1, deg2rad](const auto &l2, const auto &polar) { + return 1. / Kernel::Units::tofToDSpacingFactor(l1, l2, deg2rad * polar, 0.); + }); + + return difc; +} + +class NexusLoader { +public: + NexusLoader(const bool is_time_filtered, const size_t pulse_start_index, const size_t pulse_stop_index) + : m_is_time_filtered(is_time_filtered), m_pulse_start_index(pulse_start_index), + m_pulse_stop_index(pulse_stop_index) {} + + static void loadPulseTimes(H5::Group &entry, std::unique_ptr> &data) { + // /entry/DASlogs/frequency/time + auto logs = entry.openGroup("DASlogs"); // type=NXcollection + auto frequency = logs.openGroup("frequency"); // type=NXlog" + + auto dataset = frequency.openDataSet("time"); + NeXus::H5Util::readArray1DCoerce(dataset, *data); // pass by reference + + // groups close themselves + } + + void loadTOF(H5::Group &event_group, std::unique_ptr> &data, + const std::pair &eventRange) { + // g_log.information(NxsFieldNames::TIME_OF_FLIGHT); + auto tof_SDS = event_group.openDataSet(NxsFieldNames::TIME_OF_FLIGHT); + + // H5Util resizes the vector + const size_t offset = eventRange.first; + const size_t slabsize = (eventRange.second == std::numeric_limits::max()) + ? std::numeric_limits::max() + : eventRange.second - eventRange.first; + NeXus::H5Util::readArray1DCoerce(tof_SDS, *data, slabsize, offset); + + // get the units + std::string tof_unit; + NeXus::H5Util::readStringAttribute(tof_SDS, "units", tof_unit); + + // Convert Tof to microseconds + if (tof_unit != MICROSEC) + Kernel::Units::timeConversionVector(*data, tof_unit, MICROSEC); + } + + void loadDetid(H5::Group &event_group, std::unique_ptr> &data, + const std::pair &eventRange) { + // g_log.information(NxsFieldNames::DETID); + auto detID_SDS = event_group.openDataSet(NxsFieldNames::DETID); + + // H5Util resizes the vector + const size_t offset = eventRange.first; + const size_t slabsize = (eventRange.second == std::numeric_limits::max()) + ? std::numeric_limits::max() + : eventRange.second - eventRange.first; + NeXus::H5Util::readArray1DCoerce(detID_SDS, *data, slabsize, offset); + } + +private: + void loadEventIndex(H5::Group &event_group, std::unique_ptr> &data) { + // g_log.information(NxsFieldNames::INDEX_ID); + auto index_SDS = event_group.openDataSet(NxsFieldNames::INDEX_ID); + NeXus::H5Util::readArray1DCoerce(index_SDS, *data); + } + +public: + std::pair getEventIndexRange(H5::Group &event_group) { + constexpr uint64_t START_DEFAULT = 0; + constexpr uint64_t STOP_DEFAULT = std::numeric_limits::max(); + + if (m_is_time_filtered) { + // TODO this should be made smarter to only read the necessary range + std::unique_ptr> event_index = std::make_unique>(); + this->loadEventIndex(event_group, event_index); + + uint64_t start_event = event_index->at(m_pulse_start_index); + uint64_t stop_event = STOP_DEFAULT; + if (m_pulse_stop_index != std::numeric_limits::max()) + stop_event = event_index->at(m_pulse_stop_index); + return {start_event, stop_event}; + } else { + return {START_DEFAULT, STOP_DEFAULT}; + } + } + +private: + const bool m_is_time_filtered; + const size_t m_pulse_start_index; + const size_t m_pulse_stop_index; +}; + +class Histogrammer { +public: + Histogrammer(const std::vector *binedges, const double width, const bool linear_bins) : m_binedges(binedges) { + m_xmin = binedges->front(); + m_xmax = binedges->back(); + + if (linear_bins) { + m_findBin = DataObjects::EventList::findLinearBin; + m_bin_divisor = 1. / width; + m_bin_offset = m_xmin * m_bin_divisor; + } else { + m_findBin = DataObjects::EventList::findLogBin; + m_bin_divisor = 1. / log1p(abs(width)); // use this to do change of base + m_bin_offset = log(m_xmin) * m_bin_divisor; + } + } + + bool inRange(const double tof) const { return !(tof < m_xmin || tof >= m_xmax); } + + const std::optional findBin(const double tof) const { + return m_findBin(*m_binedges, tof, m_bin_divisor, m_bin_offset, true); + } + +private: + double m_bin_divisor; + double m_bin_offset; + double m_xmin; + double m_xmax; + const std::vector *m_binedges; + std::optional (*m_findBin)(const MantidVec &, const double, const double, const double, const bool); +}; + +template class MinMax { + const std::vector *vec; + +public: + Type minval; + Type maxval; + void operator()(const tbb::blocked_range &range) { + const auto [minele, maxele] = std::minmax_element(vec->cbegin() + range.begin(), vec->cbegin() + range.end()); + if (*minele < minval) + minval = *minele; + if (*maxele > maxval) + maxval = *maxele; + } + + MinMax(MinMax &other, tbb::split) + : vec(other.vec), minval(std::numeric_limits::max()), maxval(std::numeric_limits::min()) {} + + MinMax(const std::vector *vec) + : vec(vec), minval(std::numeric_limits::max()), maxval(std::numeric_limits::min()) {} + + void join(const MinMax &other) { + if (other.minval < minval) + minval = other.minval; + if (other.maxval > maxval) + maxval = other.maxval; + } +}; + +template std::pair parallel_minmax(const std::vector *vec, const size_t grainsize) { + if (vec->size() < grainsize) { + const auto [minval, maxval] = std::minmax_element(vec->cbegin(), vec->cend()); + return std::make_pair(*minval, *maxval); + } else { + MinMax finder(vec); + tbb::parallel_reduce(tbb::blocked_range(0, vec->size(), grainsize), finder); + return std::make_pair(finder.minval, finder.maxval); + } +} + +template class ProcessEventsTask { +public: + ProcessEventsTask(const Histogrammer *histogrammer, const std::vector *detids, + const std::vector *tofs, const AlignAndFocusPowderSlim::BankCalibration *calibration, + std::vector *y_temp, const std::set *masked) + : m_histogrammer(histogrammer), m_detids(detids), m_tofs(tofs), m_calibration(calibration), y_temp(y_temp), + masked(masked), no_mask(masked->empty()) {} + + void operator()(const tbb::blocked_range &range) const { + // both iterators need to be incremented in each loop + auto detid_ptr = m_detids->cbegin() + range.begin(); + auto tof_ptr = m_tofs->cbegin() + range.begin(); + + for (size_t i = range.begin(); i < range.end(); ++i) { + if (no_mask || (!masked->contains(*detid_ptr))) { + // focussed time-off-flight + const auto tof = static_cast(*tof_ptr) * m_calibration->value(*detid_ptr); + // increment the bin if it is found + if (m_histogrammer->inRange(tof)) { + if (const auto binnum = m_histogrammer->findBin(tof)) { + y_temp->at(binnum.value())++; + } + } + } + ++detid_ptr; + ++tof_ptr; + } + } + +private: + const Histogrammer *m_histogrammer; + const std::vector *m_detids; + const std::vector *m_tofs; + const AlignAndFocusPowderSlim::BankCalibration *m_calibration; + std::vector *y_temp; + const std::set *masked; + const bool no_mask; // whether there are any masked pixels +}; + +class ProcessBankTask { +public: + ProcessBankTask(std::vector &bankEntryNames, H5::H5File &h5file, const bool is_time_filtered, + const size_t pulse_start_index, const size_t pulse_stop_index, MatrixWorkspace_sptr &wksp, + const std::map &calibration, const std::set &masked, const double binWidth, + const bool linearBins, const size_t events_per_chunk, const size_t grainsize_event, + std::shared_ptr &progress) + : m_h5file(h5file), m_bankEntries(bankEntryNames), + m_loader(is_time_filtered, pulse_start_index, pulse_stop_index), m_wksp(wksp), m_calibration(calibration), + m_masked(masked), m_binWidth(binWidth), m_linearBins(linearBins), m_events_per_chunk(events_per_chunk), + m_grainsize_event(grainsize_event), m_progress(progress) { + if (false) { // H5Freopen_async(h5file.getId(), m_h5file.getId()) < 0) { + throw std::runtime_error("failed to reopen async"); + } + } + + void operator()(const tbb::blocked_range &range) const { + // re-use vectors to save malloc/free calls + std::unique_ptr> event_detid = std::make_unique>(); + std::unique_ptr> event_time_of_flight = std::make_unique>(); + + auto entry = m_h5file.openGroup("entry"); // type=NXentry + for (size_t wksp_index = range.begin(); wksp_index < range.end(); ++wksp_index) { + const auto &bankName = m_bankEntries[wksp_index]; + Kernel::Timer timer; + std::cout << bankName << " start" << std::endl; + + // open the bank + auto event_group = entry.openGroup(bankName); // type=NXevent_data + + // get filtering range + auto eventRangeFull = m_loader.getEventIndexRange(event_group); + // update range for data that is present + if (eventRangeFull.second == std::numeric_limits::max()) { + auto tof_SDS = event_group.openDataSet(NxsFieldNames::TIME_OF_FLIGHT); + const auto length_actual = static_cast(tof_SDS.getSpace().getSelectNpoints()); + eventRangeFull.second = length_actual; + } + + // create object so bank calibration can be re-used + std::unique_ptr calibration = nullptr; + + if (eventRangeFull.first == eventRangeFull.second) { + // g_log.warning() << "No data for bank " << entry_name << '\n'; + } else { + // TODO REMOVE debug print + std::cout << bankName << " has " << eventRangeFull.second << " events\n" + << " and should be read in " << (1 + (eventRangeFull.second / m_events_per_chunk)) << " chunks of " + << m_events_per_chunk << " (" << (m_events_per_chunk / 1024 / 1024) << "MB)" + << "\n"; + + // create a histogrammer to process the events + auto &spectrum = m_wksp->getSpectrum(wksp_index); + Histogrammer histogrammer(&spectrum.readX(), m_binWidth, m_linearBins); + + // std::atomic allows for multi-threaded accumulation and who cares about floats when you are just + // counting things + std::vector y_temp(spectrum.dataY().size()); + // std::vector y_temp(spectrum.dataY().size()); + + // read parts of the bank at a time + size_t event_index_start = eventRangeFull.first; + while (event_index_start < eventRangeFull.second) { + // H5Cpp will truncate correctly + const std::pair eventRangePartial{event_index_start, + event_index_start + m_events_per_chunk}; + + // load data and reuse chunk memory + event_detid->clear(); + event_time_of_flight->clear(); + m_loader.loadTOF(event_group, event_time_of_flight, eventRangePartial); + m_loader.loadDetid(event_group, event_detid, eventRangePartial); + + // process the events that were loaded + const auto [minval, maxval] = parallel_minmax(event_detid.get(), m_grainsize_event); + // only recreate if it doesn't already have the useful information + if ((!calibration) || (calibration->idmin() > static_cast(minval)) || + (calibration->idmax() < static_cast(maxval))) { + calibration = std::make_unique( + static_cast(minval), static_cast(maxval), m_calibration); + } + + const auto numEvent = event_time_of_flight->size(); + + // threaded processing of the events + ProcessEventsTask task(&histogrammer, event_detid.get(), event_time_of_flight.get(), calibration.get(), + &y_temp, &m_masked); + if (numEvent > m_grainsize_event) { + // use tbb + tbb::parallel_for(tbb::blocked_range(0, numEvent, m_grainsize_event), task); + } else { + // single thread + task(tbb::blocked_range(0, numEvent, m_grainsize_event)); + } + + event_index_start += m_events_per_chunk; + } + // copy the data out into the correct spectrum + auto &y_values = spectrum.dataY(); + std::copy(y_temp.cbegin(), y_temp.cend(), y_values.begin()); + + std::cout << bankName << " stop " << timer << std::endl; + } + m_progress->report(); + } + } + +private: + H5::H5File m_h5file; + const std::vector m_bankEntries; + mutable NexusLoader m_loader; + MatrixWorkspace_sptr m_wksp; + const std::map m_calibration; // detid: 1/difc + const std::set m_masked; + const double m_binWidth; + const bool m_linearBins; + /// number of events to read from disk at one time + const size_t m_events_per_chunk; + /// number of events to histogram in a single thread + const size_t m_grainsize_event; + std::shared_ptr m_progress; +}; + +} // anonymous namespace + +//---------------------------------------------------------------------------------------------- +/** Initialize the algorithm's properties. + */ +void AlignAndFocusPowderSlim::init() { + const std::vector exts{".nxs.h5", ".nxs", "_event.nxs"}; + // docs copied/modified from LoadEventNexus + declareProperty(std::make_unique(PropertyNames::FILENAME, "", FileProperty::Load, exts), + "The name of the Event NeXus file to read, including its full or relative path. " + "The file name is typically of the form INST_####_event.nxs."); + declareProperty( + std::make_unique>(PropertyNames::FILTER_TIMESTART, EMPTY_DBL(), + Direction::Input), + "To only include events after the provided start time, in seconds (relative to the start of the run)."); + + declareProperty( + std::make_unique>(PropertyNames::FILTER_TIMESTOP, EMPTY_DBL(), + Direction::Input), + "To only include events before the provided stop time, in seconds (relative to the start of the run)."); + const std::vector cal_exts{".h5", ".hd5", ".hdf", ".cal"}; + declareProperty(std::make_unique(PropertyNames::CAL_FILE, "", FileProperty::OptionalLoad, cal_exts), + "The .cal file containing the position correction factors. Either this or OffsetsWorkspace needs to " + "be specified."); + auto positiveDblValidator = std::make_shared>(); + positiveDblValidator->setLower(0); + declareProperty(std::make_unique>(PropertyNames::X_MIN, 10, positiveDblValidator, + Direction::Input), + "Minimum x-value for the output binning"); + declareProperty(std::make_unique>(PropertyNames::X_DELTA, 0.0016, + positiveDblValidator, Direction::Input), + "Bin size for output data"); + declareProperty(std::make_unique>(PropertyNames::X_MAX, 16667, positiveDblValidator, + Direction::Input), + "Minimum x-value for the output binning"); + declareProperty(std::make_unique>(PropertyNames::BINMODE), + "Specify binning behavior ('Logorithmic')"); + declareProperty( + std::make_unique>(PropertyNames::OUTPUT_WKSP, "", Direction::Output), + "An output workspace."); + + // parameters for chunking options - consider removing these later + const std::string CHUNKING_PARAM_GROUP("Chunking-temporary"); + auto positiveIntValidator = std::make_shared>(); + positiveIntValidator->setLower(1); + declareProperty( + std::make_unique>(PropertyNames::READ_BANKS_IN_THREAD, 1, positiveIntValidator), + "Number of banks to read in a single thread. Lower means more parallelization."); + setPropertyGroup(PropertyNames::READ_BANKS_IN_THREAD, CHUNKING_PARAM_GROUP); + declareProperty(std::make_unique>(PropertyNames::READ_SIZE_FROM_DISK, 2000 * 50000, + positiveIntValidator), + "Number of elements of time-of-flight or detector-id to read at a time. This is a maximum"); + setPropertyGroup(PropertyNames::READ_SIZE_FROM_DISK, CHUNKING_PARAM_GROUP); + declareProperty( + std::make_unique>(PropertyNames::EVENTS_PER_THREAD, 2000, positiveIntValidator), + "Number of events to read in a single thread. Higher means less threads are created."); + setPropertyGroup(PropertyNames::EVENTS_PER_THREAD, CHUNKING_PARAM_GROUP); +} + +std::map AlignAndFocusPowderSlim::validateInputs() { + std::map errors; + + // make sure that data is read in larger chunks than the events processed in a single thread + const int disk_chunk = getProperty(PropertyNames::READ_SIZE_FROM_DISK); + const int grainsize_events = getProperty(PropertyNames::EVENTS_PER_THREAD); + if (disk_chunk < grainsize_events) { + const std::string msg(PropertyNames::READ_SIZE_FROM_DISK + " must be larger than " + + PropertyNames::EVENTS_PER_THREAD); + errors[PropertyNames::READ_SIZE_FROM_DISK] = msg; + errors[PropertyNames::EVENTS_PER_THREAD] = msg; + } + + return errors; +} + +//---------------------------------------------------------------------------------------------- +/** Execute the algorithm. + */ +void AlignAndFocusPowderSlim::exec() { + // create a histogram workspace + constexpr size_t numHist{6}; // TODO make this determined from grouping + + // These give the limits in each file as to which events we actually load (when filtering by time). + loadStart.resize(1, 0); + loadSize.resize(1, 0); + + // set up the output workspace binning + this->progress(.0, "Create output workspace"); + const BINMODE binmode = getPropertyValue(PropertyNames::BINMODE); + const bool linearBins = bool(binmode == BinningMode::LINEAR); // this is needed later + const double x_delta = getProperty(PropertyNames::X_DELTA); // this is needed later + MatrixWorkspace_sptr wksp = createOutputWorkspace(numHist, linearBins, x_delta); + + const std::string filename = getPropertyValue(PropertyNames::FILENAME); + { // TODO TEMPORARY - this algorithm is hard coded for VULCAN + // it needs to be made more generic + if (filename.find("VULCAN") == std::string::npos) { + throw std::runtime_error("File does not appear to be for VULCAN"); + } + } + const Kernel::NexusDescriptor descriptor(filename); + + // instrument is needed for lots of things + const std::string ENTRY_TOP_LEVEL("entry"); + LoadEventNexus::loadInstrument(filename, wksp, ENTRY_TOP_LEVEL, this, &descriptor); + + // TODO parameters should be input information + const double l1{43.755}; + const std::vector polars{90, 90, 120, 150, 157, 65.5}; // two-theta + const std::vector azimuthals{180, 0, 0, 0, 0, 0}; // angle from positive x-axis + const std::vector l2s{2.296, 2.296, 2.070, 2.070, 2.070, 2.530}; + const std::vector specids; + const auto difc_focused = calculate_difc_focused(l1, l2s, polars); + + // create values for focusing time-of-flight + this->progress(.05, "Creating calibration constants"); + const std::string cal_filename = getPropertyValue(PropertyNames::CAL_FILE); + if (!cal_filename.empty()) { + this->loadCalFile(wksp, cal_filename, difc_focused); + } else { + this->initCalibrationConstants(wksp, difc_focused); + } + + /* TODO create grouping information + // create IndexInfo + // prog->doReport("Creating IndexInfo"); TODO add progress bar stuff + const std::vector range; + LoadEventNexusIndexSetup indexSetup(WS, EMPTY_INT(), EMPTY_INT(), range); + auto indexInfo = indexSetup.makeIndexInfo(); + const size_t numHist = indexInfo.size(); + */ + + // load the events + H5::H5File h5file(filename, H5F_ACC_RDONLY, NeXus::H5Util::defaultFileAcc()); + + // filter by time + double filter_time_start_sec = getProperty(PropertyNames::FILTER_TIMESTART); + double filter_time_stop_sec = getProperty(PropertyNames::FILTER_TIMESTOP); + if (filter_time_start_sec != EMPTY_DBL() || filter_time_stop_sec != EMPTY_DBL()) { + this->progress(.15, "Creating time filtering"); + is_time_filtered = true; + g_log.information() << "Filtering pulses from " << filter_time_start_sec << " to " << filter_time_stop_sec << "s\n"; + std::unique_ptr> pulse_times = std::make_unique>(); + auto entry = h5file.openGroup(ENTRY_TOP_LEVEL); + NexusLoader::loadPulseTimes(entry, pulse_times); + g_log.information() << "Pulse times from " << pulse_times->front() << " to " << pulse_times->back() + << " with length " << pulse_times->size() << '\n'; + if (!std::is_sorted(pulse_times->cbegin(), pulse_times->cend())) { + g_log.warning() << "Pulse times are not sorted, pulse time filtering will not be accurate\n"; + } + + if (filter_time_start_sec != EMPTY_DBL()) { + const double filter_time_start = pulse_times->front() + filter_time_start_sec; + const auto itStart = std::lower_bound(pulse_times->cbegin(), pulse_times->cend(), filter_time_start); + if (itStart == pulse_times->cend()) + throw std::invalid_argument("Invalid pulse time filtering, start time will filter all pulses"); + + pulse_start_index = std::distance(pulse_times->cbegin(), itStart); + } + + if (filter_time_stop_sec != EMPTY_DBL()) { + const double filter_time_stop = pulse_times->front() + filter_time_stop_sec; + const auto itStop = std::upper_bound(pulse_times->cbegin(), pulse_times->cend(), filter_time_stop); + if (itStop == pulse_times->cend()) + pulse_stop_index = std::numeric_limits::max(); + else + pulse_stop_index = std::distance(pulse_times->cbegin(), itStop); + } + + if (pulse_start_index >= pulse_stop_index) + throw std::invalid_argument("Invalid pulse time filtering"); + + g_log.information() << "Filtering pulses from " << pulse_start_index << " to " << pulse_stop_index << '\n'; + } + + // Now we want to go through all the bankN_event entries + const std::map> &allEntries = descriptor.getAllEntries(); + auto itClassEntries = allEntries.find("NXevent_data"); + + // temporary "map" for detid -> calibration constant + + if (itClassEntries != allEntries.end()) { + this->progress(.17, "Reading events"); + const std::set &classEntries = itClassEntries->second; + + // filter out the diagnostic entries + std::vector bankEntryNames; + { + const std::regex classRegex("(/entry/)([^/]*)"); + std::smatch groups; + + for (const std::string &classEntry : classEntries) { + if (std::regex_match(classEntry, groups, classRegex)) { + const std::string entry_name(groups[2].str()); + if (classEntry.ends_with("bank_error_events")) { + // do nothing + } else if (classEntry.ends_with("bank_unmapped_events")) { + // do nothing + } else { + bankEntryNames.push_back(entry_name); + } + } + } + } + + // each NXevent_data is a step + const auto num_banks_to_read = bankEntryNames.size(); + + // threaded processing of the banks + const int GRAINSIZE_BANK = getProperty(PropertyNames::READ_BANKS_IN_THREAD); + const int DISK_CHUNK = getProperty(PropertyNames::READ_SIZE_FROM_DISK); + const int GRAINSIZE_EVENTS = getProperty(PropertyNames::EVENTS_PER_THREAD); + auto progress = std::make_shared(this, .17, .9, num_banks_to_read); + ProcessBankTask task(bankEntryNames, h5file, is_time_filtered, pulse_start_index, pulse_stop_index, wksp, + m_calibration, m_masked, x_delta, linearBins, static_cast(DISK_CHUNK), + static_cast(GRAINSIZE_EVENTS), progress); + // generate threads only if appropriate + if (static_cast(GRAINSIZE_BANK) < num_banks_to_read) { + tbb::parallel_for(tbb::blocked_range(0, num_banks_to_read, static_cast(GRAINSIZE_BANK)), task); + } else { + task(tbb::blocked_range(0, num_banks_to_read, static_cast(GRAINSIZE_BANK))); + } + } + + // close the file so child algorithms can do their thing + h5file.close(); + + // set the instrument + this->progress(.9, "Set instrument geometry"); + wksp = this->editInstrumentGeometry(wksp, l1, polars, specids, l2s, azimuthals); + + // load run metadata + this->progress(.91, "Loading metadata"); + // prog->doReport("Loading metadata"); TODO add progress bar stuff + try { + LoadEventNexus::loadEntryMetadata(filename, wksp, ENTRY_TOP_LEVEL, descriptor); + } catch (std::exception &e) { + g_log.warning() << "Error while loading meta data: " << e.what() << '\n'; + } + + // load logs + this->progress(.92, "Loading logs"); + auto periodLog = std::make_unique>("period_log"); // not used + int nPeriods{1}; + LoadEventNexus::runLoadNexusLogs(filename, wksp, *this, false, nPeriods, periodLog); + + // set output units to be the same as coming from AlignAndFocusPowderFromFiles + wksp->setYUnit("Counts"); + wksp->getAxis(0)->setUnit("TOF"); + setProperty(PropertyNames::OUTPUT_WKSP, std::move(wksp)); +} + +MatrixWorkspace_sptr AlignAndFocusPowderSlim::createOutputWorkspace(const size_t numHist, const bool linearBins, + const double x_delta) { + const double x_min = getProperty(PropertyNames::X_MIN); + const double x_max = getProperty(PropertyNames::X_MAX); + + constexpr bool resize_xnew{true}; + constexpr bool full_bins_only{false}; + + HistogramData::BinEdges XValues_new(0); + if (linearBins) { + const std::vector params{x_min, x_delta, x_max}; + UNUSED_ARG(Kernel::VectorHelper::createAxisFromRebinParams(params, XValues_new.mutableRawData(), resize_xnew, + full_bins_only)); + } else { + const std::vector params{x_min, -1. * x_delta, x_max}; + UNUSED_ARG(Kernel::VectorHelper::createAxisFromRebinParams(params, XValues_new.mutableRawData(), resize_xnew, + full_bins_only)); + } + MatrixWorkspace_sptr wksp = Mantid::DataObjects::create(numHist, XValues_new); + return wksp; +} + +void AlignAndFocusPowderSlim::initCalibrationConstants(API::MatrixWorkspace_sptr &wksp, + const std::vector &difc_focus) { + const auto detInfo = wksp->detectorInfo(); + + for (auto iter = detInfo.cbegin(); iter != detInfo.cend(); ++iter) { + if (!iter->isMonitor()) { + const auto difc_focussed = getFocussedPostion(static_cast(iter->detid()), difc_focus); + m_calibration.emplace(iter->detid(), difc_focussed / detInfo.difcUncalibrated(iter->index())); + } + } +} + +void AlignAndFocusPowderSlim::loadCalFile(const Mantid::API::Workspace_sptr &inputWS, const std::string &filename, + const std::vector &difc_focus) { + auto alg = createChildAlgorithm("LoadDiffCal"); + alg->setProperty("InputWorkspace", inputWS); + alg->setPropertyValue("Filename", filename); + alg->setProperty("MakeCalWorkspace", true); + alg->setProperty("MakeGroupingWorkspace", false); + alg->setProperty("MakeMaskWorkspace", true); + alg->setPropertyValue("WorkspaceName", "temp"); + alg->executeAsChildAlg(); + + const ITableWorkspace_sptr calibrationWS = alg->getProperty("OutputCalWorkspace"); + for (size_t row = 0; row < calibrationWS->rowCount(); ++row) { + const detid_t detid = calibrationWS->cell(row, 0); + const double detc = calibrationWS->cell(row, 1); + const auto difc_focussed = getFocussedPostion(detid, difc_focus); + m_calibration.emplace(detid, difc_focussed / detc); + } + + const MaskWorkspace_sptr maskWS = alg->getProperty("OutputMaskWorkspace"); + m_masked = maskWS->getMaskedDetectors(); + g_log.debug() << "Masked detectors: " << m_masked.size() << '\n'; +} + +API::MatrixWorkspace_sptr AlignAndFocusPowderSlim::editInstrumentGeometry( + API::MatrixWorkspace_sptr &wksp, const double l1, const std::vector &polars, + const std::vector &specids, const std::vector &l2s, const std::vector &azimuthals) { + API::IAlgorithm_sptr editAlg = createChildAlgorithm("EditInstrumentGeometry"); + editAlg->setProperty("Workspace", wksp); + if (l1 > 0.) + editAlg->setProperty("PrimaryFlightPath", l1); + if (!polars.empty()) + editAlg->setProperty("Polar", polars); + if (!specids.empty()) + editAlg->setProperty("SpectrumIDs", specids); + if (!l2s.empty()) + editAlg->setProperty("L2", l2s); + if (!azimuthals.empty()) + editAlg->setProperty("Azimuthal", azimuthals); + editAlg->executeAsChildAlg(); + + wksp = editAlg->getProperty("Workspace"); + + return wksp; +} + +// ------------------------ BankCalibration object +AlignAndFocusPowderSlim::BankCalibration::BankCalibration(const detid_t idmin, const detid_t idmax, + const std::map &calibration_map) + : m_detid_offset(idmin) { + // error check the id-range + if (idmax < idmin) + throw std::runtime_error("BAD!"); // TODO better message + + // allocate memory and set the default value to 1 + m_calibration.assign(static_cast(idmax - idmin + 1), 1.); + + // copy over values that matter + auto iter = calibration_map.find(idmin); + if (iter == calibration_map.end()) + throw std::runtime_error("ALSO BAD!"); + auto iter_end = calibration_map.find(idmax); + if (iter_end != calibration_map.end()) + ++iter_end; + for (; iter != iter_end; ++iter) { + const auto index = static_cast(iter->first - m_detid_offset); + m_calibration[index] = iter->second; + } +} + +/* + * This assumes that everything is in range. Values that weren't in the calibration map get set to 1. + */ +const double &AlignAndFocusPowderSlim::BankCalibration::value(const detid_t detid) const { + return m_calibration[detid - m_detid_offset]; +} + +const detid_t &AlignAndFocusPowderSlim::BankCalibration::idmin() const { return m_detid_offset; } +detid_t AlignAndFocusPowderSlim::BankCalibration::idmax() const { + return m_detid_offset + static_cast(m_calibration.size()); +} + +} // namespace Mantid::DataHandling diff --git a/Framework/DataHandling/test/AlignAndFocusPowderSlimTest.h b/Framework/DataHandling/test/AlignAndFocusPowderSlimTest.h new file mode 100644 index 000000000000..047175865228 --- /dev/null +++ b/Framework/DataHandling/test/AlignAndFocusPowderSlimTest.h @@ -0,0 +1,128 @@ +// Mantid Repository : https://github.com/mantidproject/mantid +// +// Copyright © 2024 ISIS Rutherford Appleton Laboratory UKRI, +// NScD Oak Ridge National Laboratory, European Spallation Source, +// Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS +// SPDX - License - Identifier: GPL - 3.0 + +#pragma once + +#include + +#include "MantidAPI/Axis.h" +#include "MantidAPI/MatrixWorkspace.h" +#include "MantidDataHandling/AlignAndFocusPowderSlim.h" +#include "MantidKernel/Timer.h" + +using Mantid::API::MatrixWorkspace_sptr; +using Mantid::DataHandling::AlignAndFocusPowderSlim; + +class AlignAndFocusPowderSlimTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static AlignAndFocusPowderSlimTest *createSuite() { return new AlignAndFocusPowderSlimTest(); } + static void destroySuite(AlignAndFocusPowderSlimTest *suite) { delete suite; } + + void test_Init() { + AlignAndFocusPowderSlim alg; + TS_ASSERT_THROWS_NOTHING(alg.initialize()) + TS_ASSERT(alg.isInitialized()) + } + + // run the algorithm do some common checks and return output workspace name + MatrixWorkspace_sptr run_algorithm(const std::string &filename, const double xmin = -1., const double xmax = -1.) { + const std::string wksp_name("VULCAN"); + + std::cout << "==================> " << filename << '\n'; + Mantid::Kernel::Timer timer; + AlignAndFocusPowderSlim alg; + // Don't put output in ADS by default + alg.setChild(true); + TS_ASSERT_THROWS_NOTHING(alg.initialize()) + TS_ASSERT(alg.isInitialized()) + TS_ASSERT_THROWS_NOTHING(alg.setProperty("Filename", filename)); + TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("OutputWorkspace", wksp_name)); + if (xmin >= 0.) + TS_ASSERT_THROWS_NOTHING(alg.setProperty("XMin", xmin)); + if (xmax >= 0.) + TS_ASSERT_THROWS_NOTHING(alg.setProperty("XMax", xmax)); + TS_ASSERT_THROWS_NOTHING(alg.execute();); + TS_ASSERT(alg.isExecuted()); + std::cout << "==================> " << timer << '\n'; + + MatrixWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace"); + TS_ASSERT(outputWS); + return outputWS; + } + + void test_defaults() { + const std::string filename("VULCAN_218062.nxs.h5"); + MatrixWorkspace_sptr outputWS = run_algorithm(filename); + + constexpr size_t NUM_Y{4641}; // observed value + + // verify the output + TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 6); + TS_ASSERT_EQUALS(outputWS->blocksize(), NUM_Y); + TS_ASSERT_EQUALS(outputWS->getAxis(0)->unit()->unitID(), "TOF"); + // default values in algorithm + TS_ASSERT_EQUALS(outputWS->readX(0).front(), 10.); + TS_ASSERT_EQUALS(outputWS->readX(0).back(), 16667.); + // observed values from running + const auto y_values = outputWS->readY(0); + TS_ASSERT_EQUALS(y_values.size(), NUM_Y); + TS_ASSERT_EQUALS(y_values[0], 0.); + TS_ASSERT_EQUALS(y_values[NUM_Y / 2], 0.); + TS_ASSERT_EQUALS(y_values[NUM_Y - 1], 20719); + + // do not need to cleanup because workspace did not go into the ADS + } + + void test_common_x() { + constexpr double xmin{13000}; + constexpr double xmax{36000}; + const std::string filename("VULCAN_218062.nxs.h5"); + MatrixWorkspace_sptr outputWS = run_algorithm(filename, xmin, xmax); + + constexpr size_t NUM_Y{637}; // observed value + + // verify the output + TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 6); + TS_ASSERT_EQUALS(outputWS->blocksize(), NUM_Y); + TS_ASSERT_EQUALS(outputWS->getAxis(0)->unit()->unitID(), "TOF"); + // default values in algorithm + TS_ASSERT_EQUALS(outputWS->readX(0).front(), xmin); + TS_ASSERT_EQUALS(outputWS->readX(0).back(), xmax); + // observed values from running + const auto y_values = outputWS->readY(0); + TS_ASSERT_EQUALS(y_values.size(), NUM_Y); + TS_ASSERT_EQUALS(y_values[0], 0.); + TS_ASSERT_EQUALS(y_values[NUM_Y / 2], 55374.); // observed + TS_ASSERT_EQUALS(y_values[NUM_Y - 1], 0.); + + // do not need to cleanup because workspace did not go into the ADS + } + + // ================================== + // TODO things below this point are for benchmarking and will be removed later + // ================================== + + void run_test(const std::string &filename) { + MatrixWorkspace_sptr outputWS = run_algorithm(filename); + + // LoadEventNexus 4 seconds + // tof 6463->39950 + + // verify the output + TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 6); + TS_ASSERT_EQUALS(outputWS->blocksize(), 3349); // observed value + + // do not need to cleanup because workspace did not go into the ADS + } + + void xtest_exec1GB() { run_test("/home/pf9/build/mantid/vulcanperf/VULCAN_218075.nxs.h5"); } + + void xtest_exec10GB() { run_test("/home/pf9/build/mantid/vulcanperf/VULCAN_218092.nxs.h5"); } + + void xtest_exec18GB() { run_test("/home/pf9/build/mantid/vulcanperf/VULCAN_217967.nxs.h5"); } +}; diff --git a/Framework/DataObjects/inc/MantidDataObjects/EventList.h b/Framework/DataObjects/inc/MantidDataObjects/EventList.h index 20c9c9a68273..c13c33f5352c 100644 --- a/Framework/DataObjects/inc/MantidDataObjects/EventList.h +++ b/Framework/DataObjects/inc/MantidDataObjects/EventList.h @@ -368,7 +368,7 @@ class MANTID_DATAOBJECTS_DLL EventList : public Mantid::API::IEventList { const double offset, const bool findExact = true); private: - static std::optional findExactBin(const MantidVec &X, const double tof, size_t n_bin); + static size_t findExactBin(const MantidVec &X, const double tof, const size_t n_bin); void generateCountsHistogramPulseTime(const MantidVec &X, MantidVec &Y) const; diff --git a/Framework/DataObjects/src/EventList.cpp b/Framework/DataObjects/src/EventList.cpp index 229e3732618d..88b440478e5c 100644 --- a/Framework/DataObjects/src/EventList.cpp +++ b/Framework/DataObjects/src/EventList.cpp @@ -2669,13 +2669,19 @@ std::optional EventList::findLogBin(const MantidVec &X, const double tof * @param tof :: TOF of the event we are trying to bin * @param n_bin :: starting estiamted bin number */ -std::optional EventList::findExactBin(const MantidVec &X, const double tof, size_t n_bin) { - if (tof < X[n_bin]) - n_bin--; - else if (tof >= X[n_bin + 1]) - n_bin++; - - return n_bin; +size_t EventList::findExactBin(const MantidVec &X, const double tof, const size_t n_bin) { + // is tof slower than suggested bin + auto tof_of_bin = X.cbegin() + n_bin; // boundary suggested + if (tof < *tof_of_bin) + return std::move(n_bin - 1); + + // is tof higher than suggested bin + ++tof_of_bin; // move to next boundary + if (tof >= *tof_of_bin) + return std::move(n_bin + 1); + + // tof is in the bin + return std::move(n_bin); } // -------------------------------------------------------------------------- diff --git a/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfo.h b/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfo.h index 8395f83e0a19..458ed6f1cd1c 100644 --- a/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfo.h +++ b/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfo.h @@ -106,6 +106,7 @@ class MANTID_GEOMETRY_DLL DetectorInfo { /// Returns the index of the detector with the given detector ID. /// This will throw an out of range exception if the detector does not exist. size_t indexOf(const detid_t id) const; + detid_t detid(const size_t index) const; size_t scanCount() const; const std::vector> scanIntervals() const; diff --git a/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfoItem.h b/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfoItem.h index 8fd4b4b407a0..85a8b3f26a23 100644 --- a/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfoItem.h +++ b/Framework/Geometry/inc/MantidGeometry/Instrument/DetectorInfoItem.h @@ -50,8 +50,12 @@ template class DetectorInfoItem { double l2() const { return m_detectorInfo->l2(m_index); } + double difcUncalibrated() const { return m_detectorInfo->difcUncalibrated(m_index); } + size_t index() const { return m_index; } + size_t detid() const { return m_detectorInfo->detid(m_index); } + DetectorInfoItem(T &detectorInfo, const size_t index) : m_detectorInfo(&detectorInfo), m_index(index) {} // Non-owning pointer. A reference makes the class unable to define an diff --git a/Framework/Geometry/src/Instrument/DetectorInfo.cpp b/Framework/Geometry/src/Instrument/DetectorInfo.cpp index c7ad8fe9ed36..775150e7806c 100644 --- a/Framework/Geometry/src/Instrument/DetectorInfo.cpp +++ b/Framework/Geometry/src/Instrument/DetectorInfo.cpp @@ -414,6 +414,8 @@ std::size_t DetectorInfo::indexOf(const detid_t id) const { } } +detid_t DetectorInfo::detid(const size_t index) const { return getDetector(index).getID(); } + /// Returns the scan count of the detector with given detector index. size_t DetectorInfo::scanCount() const { return m_detectorInfo->scanCount(); } diff --git a/Testing/Data/UnitTest/VULCAN_218062.nxs.h5.md5 b/Testing/Data/UnitTest/VULCAN_218062.nxs.h5.md5 new file mode 100644 index 000000000000..27d53c696958 --- /dev/null +++ b/Testing/Data/UnitTest/VULCAN_218062.nxs.h5.md5 @@ -0,0 +1 @@ +b67e5f4f1ac400c15a387037696a1794 diff --git a/Testing/SystemTests/tests/framework/LoadLotsOfFiles.py b/Testing/SystemTests/tests/framework/LoadLotsOfFiles.py index 75877f7e17d7..ec5c88625ff0 100644 --- a/Testing/SystemTests/tests/framework/LoadLotsOfFiles.py +++ b/Testing/SystemTests/tests/framework/LoadLotsOfFiles.py @@ -128,6 +128,7 @@ "BioSANS_exp61_scan0004_0001_Iq.txt", "test_data_Iq.txt", "BioSANS_test_data_Iq.txt", + "VULCAN_218062.nxs.h5", # 1GB file for AlignAndFocusPowderSlim ] EXPECTED_EXT = ".expected" diff --git a/docs/source/algorithms/AlignAndFocusPowderSlim-v1.rst b/docs/source/algorithms/AlignAndFocusPowderSlim-v1.rst new file mode 100644 index 000000000000..14bcd7702e29 --- /dev/null +++ b/docs/source/algorithms/AlignAndFocusPowderSlim-v1.rst @@ -0,0 +1,43 @@ + +.. algorithm:: + +.. warning:: + + This algorithm is currently for the VULCAN instrument testing purposes + + +.. summary:: + +.. relatedalgorithms:: + +.. properties:: + +Description +----------- + +This is a simplified version of :ref:`algm-AlignAndFocusPowderFromFiles` which uses very few child algorithms. +The main feature is that this reads the events, filters and adjusts their time-of-flight, then increments the correct bin in the output workspace. +As a result, there is a significantly smaller memory usage and the processing is significantly faster. + +Current limitations compared to ``AlignAndFocusPowderFromFiles`` + +- only supports the VULCAN instrument +- hard coded for 6 particular groups +- common binning across all output spectra +- only specify binning in time-of-flight +- does not support event filtering +- does not support copping data +- does not support removing prompt pulse +- does not support removing bad pulses + +Child algorithms used are + +- :ref:`algm-LoadDiffCal` +- :ref:`algm-LoadIDFFromNexus-v1` +- :ref:`algm-EditInstrumentGeometry` +- :ref:`algm-LoadNexusLogs` + + +.. categories:: + +.. sourcelink:: diff --git a/docs/source/release/v6.13.0/Diffraction/Powder/New_features/.gitkeep b/docs/source/release/v6.13.0/Diffraction/Powder/New_features/.gitkeep deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/docs/source/release/v6.13.0/Diffraction/Powder/New_features/38279.rst b/docs/source/release/v6.13.0/Diffraction/Powder/New_features/38279.rst new file mode 100644 index 000000000000..37c05baa3990 --- /dev/null +++ b/docs/source/release/v6.13.0/Diffraction/Powder/New_features/38279.rst @@ -0,0 +1 @@ +- New algorithm :ref:`AlignAndFocusPowderSlim ` (VULCAN only) which is meant to replicate the functionality of :ref:`AlignAndFocusPowderFromFiles `, but performs all the work on the events directly from file.