Skip to content

[ntuple] Remove dependency from TError.h #18411

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions core/foundation/inc/ROOT/RError.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,16 @@
#include <utility>
#include <vector>

/*! Checks condition `e` and throws an exception if it's false.
* \warning this check is NOT stripped in release mode, so it should not be used for hot paths.
* For those cases, prefer a regular `assert()`;
*/
#define R7__ASSERT(e) \
do { \
if (R__unlikely(!(e))) \
throw ROOT::RException(R__FAIL(_QUOTE_(e))); \
} while (false)

namespace ROOT {

// clang-format off
Expand Down
7 changes: 3 additions & 4 deletions tree/ntuple/inc/ROOT/RColumn.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
#include <ROOT/RNTupleUtil.hxx>
#include <ROOT/RPage.hxx>
#include <ROOT/RPageStorage.hxx>

#include <TError.h>
#include <ROOT/RError.hxx>

#include <cstring> // for memcpy
#include <memory>
Expand Down Expand Up @@ -325,8 +324,8 @@ public:
void Flush();
void CommitSuppressed();

void MapPage(ROOT::NTupleSize_t globalIndex) { R__ASSERT(TryMapPage(globalIndex)); }
void MapPage(RNTupleLocalIndex localIndex) { R__ASSERT(TryMapPage(localIndex)); }
void MapPage(ROOT::NTupleSize_t globalIndex) { R7__ASSERT(TryMapPage(globalIndex)); }
void MapPage(RNTupleLocalIndex localIndex) { R7__ASSERT(TryMapPage(localIndex)); }
bool TryMapPage(ROOT::NTupleSize_t globalIndex);
bool TryMapPage(RNTupleLocalIndex localIndex);

Expand Down
3 changes: 1 addition & 2 deletions tree/ntuple/inc/ROOT/RColumnElementBase.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
#include <ROOT/RNTupleUtil.hxx>

#include <Byteswap.h>
#include <TError.h>

#include <cstring> // for memcpy
#include <cstddef> // for std::byte
Expand Down Expand Up @@ -95,7 +94,7 @@ public:
/// Derived, typed classes tell whether the on-storage layout is bitwise identical to the memory layout
virtual bool IsMappable() const
{
R__ASSERT(false);
R7__ASSERT(false);
return false;
}

Expand Down
3 changes: 0 additions & 3 deletions tree/ntuple/inc/ROOT/REntry.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@
#include <ROOT/RField.hxx>
#include <ROOT/RFieldToken.hxx>
#include <string_view>

#include <TError.h>

#include <algorithm>
#include <iterator>
#include <memory>
Expand Down
8 changes: 4 additions & 4 deletions tree/ntuple/inc/ROOT/RField/RFieldRecord.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,8 @@ public:
static std::string TypeName() { return "std::pair<" + RField<T1>::TypeName() + "," + RField<T2>::TypeName() + ">"; }
explicit RField(std::string_view name) : RPairField(name, BuildItemFields(), BuildItemOffsets())
{
R__ASSERT(fMaxAlignment >= std::max(alignof(T1), alignof(T2)));
R__ASSERT(fSize >= sizeof(ContainerT));
R7__ASSERT(fMaxAlignment >= std::max(alignof(T1), alignof(T2)));
R7__ASSERT(fSize >= sizeof(ContainerT));
}
RField(RField &&other) = default;
RField &operator=(RField &&other) = default;
Expand Down Expand Up @@ -239,8 +239,8 @@ public:
static std::string TypeName() { return "std::tuple<" + BuildItemTypes<ItemTs...>() + ">"; }
explicit RField(std::string_view name) : RTupleField(name, BuildItemFields(), BuildItemOffsets())
{
R__ASSERT(fMaxAlignment >= std::max({alignof(ItemTs)...}));
R__ASSERT(fSize >= sizeof(ContainerT));
R7__ASSERT(fMaxAlignment >= std::max({alignof(ItemTs)...}));
R7__ASSERT(fSize >= sizeof(ContainerT));
}
RField(RField &&other) = default;
RField &operator=(RField &&other) = default;
Expand Down
2 changes: 1 addition & 1 deletion tree/ntuple/inc/ROOT/RField/RFieldSequenceContainer.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ private:
protected:
std::unique_ptr<RFieldBase> CloneImpl(std::string_view newName) const final;

void GenerateColumns() final { R__ASSERT(false && "RArrayAsRVec fields must only be used for reading"); }
void GenerateColumns() final { R7__ASSERT(false && "RArrayAsRVec fields must only be used for reading"); }
using RFieldBase::GenerateColumns;

void ConstructValue(void *where) const final;
Expand Down
10 changes: 5 additions & 5 deletions tree/ntuple/inc/ROOT/RFieldBase.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ protected:
fAuxiliaryColumn = column.get();
} else {
// We currently have no fields with more than 2 columns in its column representation
R__ASSERT(representationIndex > 0);
R7__ASSERT(representationIndex > 0);
}

if constexpr (sizeof...(TailTs))
Expand Down Expand Up @@ -867,13 +867,13 @@ namespace Internal {
struct RFieldRepresentationModifier {
static void SetPrimaryColumnRepresentation(RFieldBase &field, std::uint16_t newRepresentationIdx)
{
R__ASSERT(newRepresentationIdx < field.fColumnRepresentatives.size());
R7__ASSERT(newRepresentationIdx < field.fColumnRepresentatives.size());
const auto N = field.fColumnRepresentatives[0].get().size();
R__ASSERT(N >= 1 && N <= 2);
R__ASSERT(field.fPrincipalColumn);
R7__ASSERT(N >= 1 && N <= 2);
R7__ASSERT(field.fPrincipalColumn);
field.fPrincipalColumn = field.fAvailableColumns[newRepresentationIdx * N].get();
if (field.fAuxiliaryColumn) {
R__ASSERT(N == 2);
R7__ASSERT(N == 2);
field.fAuxiliaryColumn = field.fAvailableColumns[newRepresentationIdx * N + 1].get();
}
}
Expand Down
4 changes: 1 addition & 3 deletions tree/ntuple/inc/ROOT/RNTupleDescriptor.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@
#include <ROOT/RNTupleUtil.hxx>
#include <ROOT/RSpan.hxx>

#include <TError.h>

#include <algorithm>
#include <chrono>
#include <cmath>
Expand Down Expand Up @@ -1231,7 +1229,7 @@ public:
}
RColumnDescriptorBuilder &SetSuppressedDeferred()
{
R__ASSERT(fColumn.fFirstElementIndex != 0);
R7__ASSERT(fColumn.fFirstElementIndex != 0);
if (fColumn.fFirstElementIndex > 0)
fColumn.fFirstElementIndex = -fColumn.fFirstElementIndex;
return *this;
Expand Down
9 changes: 4 additions & 5 deletions tree/ntuple/inc/ROOT/RNTupleMetrics.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@
#define ROOT_RNTupleMetrics

#include <ROOT/RConfig.hxx>
#include <string_view>

#include <TError.h>
#include <ROOT/RError.hxx>

#include <string_view>
#include <atomic>
#include <chrono>
#include <cstdint>
Expand Down Expand Up @@ -214,7 +213,7 @@ public:
RNTupleTickCounter(const std::string &name, const std::string &unit, const std::string &desc)
: BaseCounterT(name, unit, desc)
{
R__ASSERT(unit == "ns");
R7__ASSERT(unit == "ns");
}

std::int64_t GetValueAsInt() const final {
Expand Down Expand Up @@ -308,7 +307,7 @@ public:
template <typename CounterPtrT, class... Args>
CounterPtrT MakeCounter(const std::string &name, Args&&... args)
{
R__ASSERT(!Contains(name));
R7__ASSERT(!Contains(name));
auto counter = std::make_unique<std::remove_pointer_t<CounterPtrT>>(name, std::forward<Args>(args)...);
auto ptrCounter = counter.get();
fCounters.emplace_back(std::move(counter));
Expand Down
2 changes: 1 addition & 1 deletion tree/ntuple/inc/ROOT/RNTupleModel.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ public:
auto fieldZero = fOpenChangeset.fModel.fFieldZero.get();
auto it =
std::find_if(fieldZero->begin(), fieldZero->end(), [&](const auto &f) { return f.GetFieldName() == name; });
R__ASSERT(it != fieldZero->end());
R7__ASSERT(it != fieldZero->end());
fOpenChangeset.fAddedFields.emplace_back(&(*it));
return objPtr;
}
Expand Down
28 changes: 14 additions & 14 deletions tree/ntuple/inc/ROOT/RNTupleZip.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
#ifndef ROOT_RNTupleZip
#define ROOT_RNTupleZip

#include <ROOT/RError.hxx>
#include <RZip.h>
#include <TError.h>

#include <algorithm>
#include <array>
Expand Down Expand Up @@ -45,8 +45,8 @@ public:
/// Returns the size of the compressed data, written into the provided output buffer.
static std::size_t Zip(const void *from, std::size_t nbytes, int compression, void *to)
{
R__ASSERT(from != nullptr);
R__ASSERT(to != nullptr);
R7__ASSERT(from != nullptr);
R7__ASSERT(to != nullptr);
auto cxLevel = compression % 100;
if (cxLevel == 0) {
memcpy(to, from, nbytes);
Expand All @@ -64,7 +64,7 @@ public:
for (unsigned int i = 0; i < nZipBlocks; ++i) {
int szSource = std::min(static_cast<int>(kMAXZIPBUF), szRemaining);
R__zipMultipleAlgorithm(cxLevel, &szSource, source, &szTarget, target, &szOutBlock, cxAlgorithm);
R__ASSERT(szOutBlock >= 0);
R7__ASSERT(szOutBlock >= 0);
if ((szOutBlock == 0) || (szOutBlock >= szSource)) {
// Uncompressible block, we have to store the entire input data stream uncompressed
memcpy(to, from, nbytes);
Expand All @@ -76,8 +76,8 @@ public:
target += szOutBlock;
szRemaining -= szSource;
}
R__ASSERT(szRemaining == 0);
R__ASSERT(szZipData < nbytes);
R7__ASSERT(szRemaining == 0);
R7__ASSERT(szZipData < nbytes);
return szZipData;
}
};
Expand Down Expand Up @@ -107,7 +107,7 @@ public:
memcpy(to, from, nbytes);
return;
}
R__ASSERT(dataLen > nbytes);
R7__ASSERT(dataLen > nbytes);

unsigned char *source = const_cast<unsigned char *>(static_cast<const unsigned char *>(from));
unsigned char *target = static_cast<unsigned char *>(to);
Expand All @@ -116,21 +116,21 @@ public:
int szSource;
int szTarget;
int retval = R__unzip_header(&szSource, source, &szTarget);
R__ASSERT(retval == 0);
R__ASSERT(szSource > 0);
R__ASSERT(szTarget > szSource);
R__ASSERT(static_cast<unsigned int>(szSource) <= nbytes);
R__ASSERT(static_cast<unsigned int>(szTarget) <= dataLen);
R7__ASSERT(retval == 0);
R7__ASSERT(szSource > 0);
R7__ASSERT(szTarget > szSource);
R7__ASSERT(static_cast<unsigned int>(szSource) <= nbytes);
R7__ASSERT(static_cast<unsigned int>(szTarget) <= dataLen);

int unzipBytes = 0;
R__unzip(&szSource, source, &szTarget, target, &unzipBytes);
R__ASSERT(unzipBytes == szTarget);
R7__ASSERT(unzipBytes == szTarget);

target += szTarget;
source += szSource;
szRemaining -= unzipBytes;
} while (szRemaining > 0);
R__ASSERT(szRemaining == 0);
R7__ASSERT(szRemaining == 0);
}
};

Expand Down
4 changes: 1 addition & 3 deletions tree/ntuple/src/RCluster.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@

#include <ROOT/RCluster.hxx>

#include <TError.h>

#include <iterator>
#include <utility>

Expand Down Expand Up @@ -47,7 +45,7 @@ void ROOT::Experimental::Internal::RCluster::Adopt(std::unique_ptr<ROnDiskPageMa

void ROOT::Experimental::Internal::RCluster::Adopt(RCluster &&other)
{
R__ASSERT(fClusterId == other.fClusterId);
R7__ASSERT(fClusterId == other.fClusterId);

auto &pages = other.fOnDiskPages;
fOnDiskPages.insert(std::make_move_iterator(pages.begin()), std::make_move_iterator(pages.end()));
Expand Down
18 changes: 8 additions & 10 deletions tree/ntuple/src/RClusterPool.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
#include <ROOT/RNTupleDescriptor.hxx>
#include <ROOT/RPageStorage.hxx>

#include <TError.h>

#include <algorithm>
#include <chrono>
#include <future>
Expand Down Expand Up @@ -55,7 +53,7 @@ ROOT::Experimental::Internal::RClusterPool::RClusterPool(ROOT::Internal::RPageSo
fPool(2 * clusterBunchSize),
fThreadIo(&RClusterPool::ExecReadClusters, this)
{
R__ASSERT(clusterBunchSize > 0);
R7__ASSERT(clusterBunchSize > 0);
}

ROOT::Experimental::Internal::RClusterPool::~RClusterPool()
Expand Down Expand Up @@ -87,7 +85,7 @@ void ROOT::Experimental::Internal::RClusterPool::ExecReadClusters()
// `kInvalidDescriptorId` is used as a marker for thread cancellation. Such item causes the
// thread to terminate; thus, it must appear last in the queue.
if (R__unlikely(item.fClusterKey.fClusterId == ROOT::kInvalidDescriptorId)) {
R__ASSERT(i == (readItems.size() - 1));
R7__ASSERT(i == (readItems.size() - 1));
return;
}
if ((bunchId >= 0) && (item.fBunchId != bunchId))
Expand Down Expand Up @@ -123,7 +121,7 @@ size_t ROOT::Experimental::Internal::RClusterPool::FindFreeSlot() const
return i;
}

R__ASSERT(false);
R7__ASSERT(false);
return N;
}

Expand Down Expand Up @@ -247,7 +245,7 @@ ROOT::Experimental::Internal::RClusterPool::GetCluster(ROOT::DescriptorId_t clus
std::lock_guard<std::mutex> lockGuard(fLockWorkQueue);

for (auto itr = fInFlightClusters.begin(); itr != fInFlightClusters.end(); ) {
R__ASSERT(itr->fFuture.valid());
R7__ASSERT(itr->fFuture.valid());
if (itr->fFuture.wait_for(std::chrono::seconds(0)) != std::future_status::ready) {
// Remove the set of columns that are already scheduled for being loaded
provide.Erase(itr->fClusterKey.fClusterId, itr->fClusterKey.fPhysicalColumnSet);
Expand All @@ -256,7 +254,7 @@ ROOT::Experimental::Internal::RClusterPool::GetCluster(ROOT::DescriptorId_t clus
}

auto cptr = itr->fFuture.get();
R__ASSERT(cptr);
R7__ASSERT(cptr);

const bool isExpired =
!provide.Contains(itr->fClusterKey.fClusterId) && (keep.count(itr->fClusterKey.fClusterId) == 0);
Expand Down Expand Up @@ -305,7 +303,7 @@ ROOT::Experimental::Internal::RClusterPool::GetCluster(ROOT::DescriptorId_t clus
// case but it's not ensured by the code
if (!skipPrefetch) {
for (const auto &kv : provide) {
R__ASSERT(!kv.second.fPhysicalColumnSet.empty());
R7__ASSERT(!kv.second.fPhysicalColumnSet.empty());

RReadItem readItem;
readItem.fClusterKey.fClusterId = kv.first;
Expand Down Expand Up @@ -357,7 +355,7 @@ ROOT::Experimental::Internal::RClusterPool::WaitFor(ROOT::DescriptorId_t cluster
if (itr->fClusterKey.fClusterId == clusterId)
break;
}
R__ASSERT(itr != fInFlightClusters.end());
R7__ASSERT(itr != fInFlightClusters.end());
// Note that the fInFlightClusters is accessed concurrently only by the I/O thread. The I/O thread
// never changes the structure of the in-flight clusters array (it does not add, remove, or swap elements).
// Therefore, it is safe to access the element pointed to by itr here even after fLockWorkQueue
Expand All @@ -366,7 +364,7 @@ ROOT::Experimental::Internal::RClusterPool::WaitFor(ROOT::DescriptorId_t cluster

auto cptr = itr->fFuture.get();
// We were blocked waiting for the cluster, so assume that nobody discarded it.
R__ASSERT(cptr != nullptr);
R7__ASSERT(cptr != nullptr);

// Noop unless the page source has a task scheduler
fPageSource.UnzipCluster(cptr.get());
Expand Down
4 changes: 1 addition & 3 deletions tree/ntuple/src/RColumn.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
#include <ROOT/RNTupleDescriptor.hxx>
#include <ROOT/RPageStorage.hxx>

#include <TError.h>

#include <algorithm>
#include <cassert>
#include <utility>
Expand Down Expand Up @@ -73,7 +71,7 @@ void ROOT::Internal::RColumn::Flush()

fPageSink->CommitPage(fHandleSink, fWritePage);
fWritePage = fPageSink->ReservePage(fHandleSink, fInitialNElements);
R__ASSERT(!fWritePage.IsNull());
R7__ASSERT(!fWritePage.IsNull());
fWritePage.Reset(fNElements);
}

Expand Down
2 changes: 1 addition & 1 deletion tree/ntuple/src/RColumnElement.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ ROOT::Internal::GenerateColumnElement(std::type_index inMemoryType, ENTupleColum
} else if (inMemoryType == std::type_index(typeid(RTestFutureColumn))) {
return GenerateColumnElementInternal<RTestFutureColumn>(onDiskType);
} else {
R__ASSERT(!"Invalid memory type in GenerateColumnElement");
R7__ASSERT(!"Invalid memory type in GenerateColumnElement");
}
// never here
return nullptr;
Expand Down
Loading
Loading