diff --git a/benchmark/GroupByHashMapBenchmark.cpp b/benchmark/GroupByHashMapBenchmark.cpp index 346c9da931..780785e9bc 100644 --- a/benchmark/GroupByHashMapBenchmark.cpp +++ b/benchmark/GroupByHashMapBenchmark.cpp @@ -364,7 +364,7 @@ class GroupByHashMapBenchmark : public BenchmarkInterface { } else { firstColumn = generateRandomGroupVec(numInputRows, numGroups); } - std::ranges::transform( + ql::ranges::transform( firstColumn.begin(), firstColumn.end(), groupValues.begin(), [](size_t value) { return ValueId::makeFromInt(static_cast(value)); @@ -375,7 +375,7 @@ class GroupByHashMapBenchmark : public BenchmarkInterface { auto localVocab = LocalVocab{}; if (valueTypes != ValueIdType::Strings) { auto secondColumn = generateRandomDoubleVec(numInputRows); - std::ranges::transform( + ql::ranges::transform( secondColumn.begin(), secondColumn.end(), otherValues.begin(), [&](double value) { if (valueTypes == ValueIdType::OnlyDouble) @@ -396,10 +396,10 @@ class GroupByHashMapBenchmark : public BenchmarkInterface { numInputRows, randomStringLength); localVocab = std::move(newLocalVocab); - std::ranges::transform(indices.begin(), indices.end(), - otherValues.begin(), [&](LocalVocabIndex idx) { - return ValueId::makeFromLocalVocabIndex(idx); - }); + ql::ranges::transform(indices.begin(), indices.end(), otherValues.begin(), + [&](LocalVocabIndex idx) { + return ValueId::makeFromLocalVocabIndex(idx); + }); } std::vector> variables = {Variable{"?a"}, diff --git a/benchmark/JoinAlgorithmBenchmark.cpp b/benchmark/JoinAlgorithmBenchmark.cpp index b06202dbf4..553785609f 100644 --- a/benchmark/JoinAlgorithmBenchmark.cpp +++ b/benchmark/JoinAlgorithmBenchmark.cpp @@ -115,7 +115,7 @@ struct SetOfIdTableColumnElements { */ explicit SetOfIdTableColumnElements( const std::span& idTableColumnRef) { - std::ranges::for_each(idTableColumnRef, [this](const ValueId& id) { + ql::ranges::for_each(idTableColumnRef, [this](const ValueId& id) { if (auto numOccurrencesIterator = numOccurrences_.find(id); numOccurrencesIterator != numOccurrences_.end()) { (numOccurrencesIterator->second)++; @@ -190,7 +190,7 @@ static size_t createOverlapRandomly(IdTableAndJoinColumn* const smallerTable, // Create the overlap. ad_utility::HashMap> smallerTableElementToNewElement{}; - std::ranges::for_each( + ql::ranges::for_each( smallerTableJoinColumnRef, [&randomDouble, &probabilityToCreateOverlap, &smallerTableElementToNewElement, &randomBiggerTableElement, @@ -295,7 +295,7 @@ static size_t createOverlapRandomly(IdTableAndJoinColumn* const smallerTable, size_t newOverlapMatches{0}; ad_utility::HashMap> smallerTableElementToNewElement{}; - std::ranges::for_each( + ql::ranges::for_each( smallerTableJoinColumnSet.uniqueElements_, [&randomBiggerTableElement, &wantedNumNewOverlapMatches, &newOverlapMatches, &smallerTableElementToNewElement, @@ -326,7 +326,7 @@ static size_t createOverlapRandomly(IdTableAndJoinColumn* const smallerTable, }); // Overwrite the designated values in the smaller table. - std::ranges::for_each( + ql::ranges::for_each( smallerTableJoinColumnRef, [&smallerTableElementToNewElement](auto& id) { if (auto newValueIterator = smallerTableElementToNewElement.find(id); newValueIterator != smallerTableElementToNewElement.end()) { @@ -465,17 +465,17 @@ static std::vector mergeSortedVectors( std::vector mergedVector{}; // Merge. - std::ranges::for_each(intervals, [&mergedVector](std::vector elements) { + ql::ranges::for_each(intervals, [&mergedVector](std::vector elements) { if (mergedVector.empty() || elements.empty()) { - std::ranges::copy(elements, std::back_inserter(mergedVector)); + ql::ranges::copy(elements, std::back_inserter(mergedVector)); return; } const size_t idxOldLastElem = mergedVector.size() - 1; - std::ranges::copy(elements, std::back_inserter(mergedVector)); + ql::ranges::copy(elements, std::back_inserter(mergedVector)); if (mergedVector.at(idxOldLastElem) > mergedVector.at(idxOldLastElem + 1)) { - std::ranges::inplace_merge( + ql::ranges::inplace_merge( mergedVector, - std::ranges::next(mergedVector.begin(), idxOldLastElem + 1)); + ql::ranges::next(mergedVector.begin(), idxOldLastElem + 1)); } }); @@ -935,7 +935,7 @@ class GeneralInterfaceImplementation : public BenchmarkInterface { "' must be bigger than, or equal to, 0.")}; config.addValidator( [](const benchmarkSampleSizeRatiosValueType& vec) { - return std::ranges::all_of( + return ql::ranges::all_of( vec, [](const benchmarkSampleSizeRatiosValueType::value_type ratio) { return ratio >= 0.f; @@ -961,7 +961,7 @@ class GeneralInterfaceImplementation : public BenchmarkInterface { ".")}; config.addValidator( [](const benchmarkSampleSizeRatiosValueType& vec) { - return std::ranges::max(vec) <= + return ql::ranges::max(vec) <= getMaxValue() - 1.f; }, @@ -1056,9 +1056,9 @@ class GeneralInterfaceImplementation : public BenchmarkInterface { }, descriptor, descriptor, option); }; - std::ranges::for_each(std::vector{minBiggerTableRows, maxBiggerTableRows, - minSmallerTableRows}, - addCastableValidator); + ql::ranges::for_each(std::vector{minBiggerTableRows, maxBiggerTableRows, + minSmallerTableRows}, + addCastableValidator); } /* @@ -1303,7 +1303,7 @@ class GeneralInterfaceImplementation : public BenchmarkInterface { ColumnNumWithType{toUnderlying(TimeForMergeGallopingJoin)}); // Calculate, how much of a speedup the hash join algorithm has in - // comparison to the merge/galloping join algrithm. + // comparison to the merge/galloping join algorithm. calculateSpeedupOfColumn( table, {toUnderlying(JoinAlgorithmSpeedup)}, {toUnderlying(TimeForHashJoin)}, @@ -1684,7 +1684,7 @@ class BmOnlyBiggerTableSizeChanges final static_cast(getConfigVariables().minBiggerTableRows_) / static_cast(smallerTableNumRows)))}; auto growthFunction = createDefaultGrowthLambda( - 10.f, std::ranges::max(minRatio, 10.f), + 10.f, ql::ranges::max(minRatio, 10.f), generateNaturalNumberSequenceInterval(minRatio, 9.f)); ResultTable& table = makeGrowingBenchmarkTable( &results, tableName, "Row ratio", alwaysFalse, @@ -1742,8 +1742,8 @@ class BmOnlySmallerTableSizeChanges final for (const float ratioRows : mergeSortedVectors( {generateNaturalNumberSequenceInterval( getConfigVariables().minRatioRows_, - std::ranges::min(getConfigVariables().maxRatioRows_, - 10.f)), + ql::ranges::min(getConfigVariables().maxRatioRows_, + 10.f)), generateExponentInterval( 10.f, getConfigVariables().minRatioRows_, getConfigVariables().maxRatioRows_)})) { @@ -1755,7 +1755,7 @@ class BmOnlySmallerTableSizeChanges final // Returns the amount of rows in the smaller `IdTable`, used for the // measurements in a given row. auto growthFunction = createDefaultGrowthLambda( - 10UL, std::ranges::max( + 10UL, ql::ranges::max( static_cast( static_cast( getConfigVariables().minBiggerTableRows_) / @@ -1867,7 +1867,7 @@ class BmSampleSizeRatio final : public GeneralInterfaceImplementation { BenchmarkResults runAllBenchmarks() override { BenchmarkResults results{}; const auto& ratios{getConfigVariables().benchmarkSampleSizeRatios_}; - const float maxSampleSizeRatio{std::ranges::max(ratios)}; + const float maxSampleSizeRatio{ql::ranges::max(ratios)}; /* We work with the biggest possible smaller and bigger table. That should make @@ -2097,17 +2097,17 @@ class BmSmallerTableGrowsBiggerTableRemainsSameSize final static_cast(biggerTableNumRows) / static_cast(getConfigVariables().minSmallerTableRows_))}; std::vector smallerTableRows; - std::ranges::transform( + ql::ranges::transform( mergeSortedVectors( {generateNaturalNumberSequenceInterval( - 1.f, std::ranges::min(10.f, biggestRowRatio)), + 1.f, ql::ranges::min(10.f, biggestRowRatio)), generateExponentInterval(10.f, 10.f, biggestRowRatio)}), std::back_inserter(smallerTableRows), [&biggerTableNumRows](const float ratio) { return static_cast( static_cast(biggerTableNumRows) / ratio); }); - std::ranges::reverse(smallerTableRows); + ql::ranges::reverse(smallerTableRows); const size_t lastSmallerTableRow{smallerTableRows.back()}; auto growthFunction = createDefaultGrowthLambda( 10UL, lastSmallerTableRow + 1UL, std::move(smallerTableRows)); diff --git a/benchmark/ParallelMergeBenchmark.cpp b/benchmark/ParallelMergeBenchmark.cpp index 1f33b996ba..98535bc29b 100644 --- a/benchmark/ParallelMergeBenchmark.cpp +++ b/benchmark/ParallelMergeBenchmark.cpp @@ -28,12 +28,12 @@ class IdTableCompressedWriterBenchmark : public BenchmarkInterface { ad_utility::integerRange(numInputRows)) { res.push_back(gen()); } - std::ranges::sort(res); + ql::ranges::sort(res); return res; }; std::vector> inputs; inputs.resize(numInputs); - std::ranges::generate(inputs, generateRandomVec); + ql::ranges::generate(inputs, generateRandomVec); auto run = [&inputs]() { auto merger = ad_utility::parallelMultiwayMerge( diff --git a/benchmark/infrastructure/BenchmarkMain.cpp b/benchmark/infrastructure/BenchmarkMain.cpp index b605ed7ac0..8a720ff526 100644 --- a/benchmark/infrastructure/BenchmarkMain.cpp +++ b/benchmark/infrastructure/BenchmarkMain.cpp @@ -90,7 +90,7 @@ static void writeBenchmarkClassAndBenchmarkResultsToJsonFile( Print the configuration documentation of all registered benchmarks. */ static __attribute__((noreturn)) void printConfigurationOptionsAndExit() { - std::ranges::for_each( + ql::ranges::for_each( BenchmarkRegister::getAllRegisteredBenchmarks(), [](const BenchmarkInterface* bench) { std::cerr << createCategoryTitle( @@ -211,13 +211,13 @@ int main(int argc, char** argv) { // Actually processing the arguments. if (vm.count("print")) { // Print the results and metadata. - std::ranges::for_each(benchmarkClassAndResults, - [](const auto& pair) { - std::cout << benchmarkResultsToString(pair.first, - pair.second) - << "\n\n"; - }, - {}); + ql::ranges::for_each(benchmarkClassAndResults, + [](const auto& pair) { + std::cout << benchmarkResultsToString(pair.first, + pair.second) + << "\n\n"; + }, + {}); } if (vm.count("write")) { diff --git a/benchmark/infrastructure/BenchmarkMeasurementContainer.cpp b/benchmark/infrastructure/BenchmarkMeasurementContainer.cpp index 04bd28f41a..29f54e543d 100644 --- a/benchmark/infrastructure/BenchmarkMeasurementContainer.cpp +++ b/benchmark/infrastructure/BenchmarkMeasurementContainer.cpp @@ -129,7 +129,7 @@ void ResultTable::init(const std::string& descriptor, descriptorForLog_ = std::move(descriptorForLog); columnNames_ = columnNames; entries_.resize(rowNames.size()); - std::ranges::fill(entries_, std::vector(columnNames.size())); + ql::ranges::fill(entries_, std::vector(columnNames.size())); // Setting the row names. for (size_t row = 0; row < rowNames.size(); row++) { @@ -287,10 +287,10 @@ ResultTable::operator std::string() const { }); // Which of the entries is the longest? - columnMaxStringWidth.at(column) = std::ranges::max(stringWidthOfRow); + columnMaxStringWidth.at(column) = ql::ranges::max(stringWidthOfRow); // Is the name of the column bigger? - columnMaxStringWidth.at(column) = std::ranges::max( + columnMaxStringWidth.at(column) = ql::ranges::max( columnMaxStringWidth.at(column), columnNames_.at(column).length()); } @@ -384,7 +384,7 @@ void ResultGroup::deleteEntryImpl(T& entry) { }(); // Delete `entry`. - auto entryIterator{std::ranges::find( + auto entryIterator{ql::ranges::find( vec, &entry, [](const ad_utility::CopyableUniquePtr& pointer) { return pointer.get(); })}; diff --git a/benchmark/infrastructure/BenchmarkToJson.cpp b/benchmark/infrastructure/BenchmarkToJson.cpp index c00b478c4c..b5430304bf 100644 --- a/benchmark/infrastructure/BenchmarkToJson.cpp +++ b/benchmark/infrastructure/BenchmarkToJson.cpp @@ -46,8 +46,8 @@ static nlohmann::json transformIntoJsonArray( */ nlohmann::ordered_json jsonArray = nlohmann::ordered_json::array(); - std::ranges::transform(vec, std::back_inserter(jsonArray), - translationFunction); + ql::ranges::transform(vec, std::back_inserter(jsonArray), + translationFunction); return jsonArray; } diff --git a/benchmark/util/ResultTableColumnOperations.h b/benchmark/util/ResultTableColumnOperations.h index bf0ea96f6d..46883c6208 100644 --- a/benchmark/util/ResultTableColumnOperations.h +++ b/benchmark/util/ResultTableColumnOperations.h @@ -37,8 +37,8 @@ requires(sizeof...(ColumnInputTypes) > 0) void generateColumnWithColumnInput( // Using a column more than once is the sign of an error. std::array allColumnNums{ {inputColumns.columnNum_...}}; - std::ranges::sort(allColumnNums); - AD_CONTRACT_CHECK(std::ranges::adjacent_find(allColumnNums) == + ql::ranges::sort(allColumnNums); + AD_CONTRACT_CHECK(ql::ranges::adjacent_find(allColumnNums) == allColumnNums.end()); // Fill the result column. diff --git a/src/backports/algorithm.h b/src/backports/algorithm.h index 86b8ecec8d..2c14823a31 100644 --- a/src/backports/algorithm.h +++ b/src/backports/algorithm.h @@ -12,9 +12,9 @@ #include "backports/concepts.h" // The following defines namespaces `ql::ranges` and `ql::views` that are almost -// drop-in replacements for `std::ranges` and `std::views`. In C++20 mode (when +// drop-in replacements for `ql::ranges` and `std::views`. In C++20 mode (when // the `QLEVER_CPP_17` macro is not used), these namespaces are simply aliases -// for `std::ranges` and `std::views`. In C++17 mode they contain the ranges and +// for `ql::ranges` and `std::views`. In C++17 mode they contain the ranges and // views from Erice Niebler's `range-v3` library. NOTE: `ql::ranges::unique` // currently doesn't work, because the interface to this function is different // in both implementations. NOTE: There might be other caveats which we are diff --git a/src/engine/AddCombinedRowToTable.h b/src/engine/AddCombinedRowToTable.h index 45f51fb527..6aa8186631 100644 --- a/src/engine/AddCombinedRowToTable.h +++ b/src/engine/AddCombinedRowToTable.h @@ -356,7 +356,7 @@ class AddCombinedRowToIdTable { // Only merge non-null vocabs. auto range = currentVocabs_ | ql::views::filter(toBool) | ql::views::transform(dereference); - mergedVocab_.mergeWith(std::ranges::ref_view{range}); + mergedVocab_.mergeWith(ql::ranges::ref_view{range}); } } const IdTableView<0>& inputLeft() const { diff --git a/src/engine/CartesianProductJoin.cpp b/src/engine/CartesianProductJoin.cpp index cedb832648..3a4e6651d1 100644 --- a/src/engine/CartesianProductJoin.cpp +++ b/src/engine/CartesianProductJoin.cpp @@ -182,9 +182,9 @@ VariableToColumnMap CartesianProductJoin::computeVariableToColumnMap() const { } // _____________________________________________________________________________ -IdTable CartesianProductJoin::writeAllColumns( - std::ranges::random_access_range auto idTables, size_t offset, size_t limit, - size_t lastTableOffset) const { +CPP_template_def(typename R)(requires ql::ranges::random_access_range) + IdTable CartesianProductJoin::writeAllColumns( + R idTables, size_t offset, size_t limit, size_t lastTableOffset) const { AD_CORRECTNESS_CHECK(offset >= lastTableOffset); IdTable result{getResultWidth(), getExecutionContext()->getAllocator()}; // TODO Find a solution to cheaply handle the case, that only a @@ -302,12 +302,14 @@ CartesianProductJoin::calculateSubResults(bool requestLaziness) { } // _____________________________________________________________________________ -Result::Generator CartesianProductJoin::produceTablesLazily( - LocalVocab mergedVocab, std::ranges::range auto idTables, size_t offset, - size_t limit, size_t lastTableOffset) const { +CPP_template_def(typename R)(requires ql::ranges::range) Result::Generator + CartesianProductJoin::produceTablesLazily(LocalVocab mergedVocab, + R idTables, size_t offset, + size_t limit, + size_t lastTableOffset) const { while (limit > 0) { uint64_t limitWithChunkSize = std::min(limit, chunkSize_); - IdTable idTable = writeAllColumns(std::ranges::ref_view(idTables), offset, + IdTable idTable = writeAllColumns(ql::ranges::ref_view(idTables), offset, limitWithChunkSize, lastTableOffset); size_t tableSize = idTable.size(); AD_CORRECTNESS_CHECK(tableSize <= limit); diff --git a/src/engine/CartesianProductJoin.h b/src/engine/CartesianProductJoin.h index 8c0a071c98..9988cb72e6 100644 --- a/src/engine/CartesianProductJoin.h +++ b/src/engine/CartesianProductJoin.h @@ -94,9 +94,9 @@ class CartesianProductJoin : public Operation { // rows to write at most. `lastTableOffset` is the offset of the last table, // to account for cases where the last table does not cover the whole result // and so index 0 of a table does not correspond to row 0 of the result. - IdTable writeAllColumns(std::ranges::random_access_range auto idTables, - size_t offset, size_t limit, - size_t lastTableOffset = 0) const; + CPP_template(typename R)(requires ql::ranges::random_access_range) IdTable + writeAllColumns(R idTables, size_t offset, size_t limit, + size_t lastTableOffset = 0) const; // Calculate the subresults of the children and store them into a vector. If // the rightmost child can produce a lazy result, it will be stored outside of @@ -114,10 +114,9 @@ class CartesianProductJoin : public Operation { // `lastTableOffset` is the offset of the last table in the range. This is // used to handle `IdTable`s yielded by generators where the range of indices // they represent do not cover the whole result. - Result::Generator produceTablesLazily(LocalVocab mergedVocab, - std::ranges::range auto idTables, - size_t offset, size_t limit, - size_t lastTableOffset = 0) const; + CPP_template(typename R)(requires ql::ranges::range) Result::Generator + produceTablesLazily(LocalVocab mergedVocab, R idTables, size_t offset, + size_t limit, size_t lastTableOffset = 0) const; // Similar to `produceTablesLazily` but can handle a single lazy result. Result::Generator createLazyConsumer( diff --git a/src/engine/Describe.cpp b/src/engine/Describe.cpp index 5fb01dbe6c..a0c43222d2 100644 --- a/src/engine/Describe.cpp +++ b/src/engine/Describe.cpp @@ -53,10 +53,9 @@ string Describe::getCacheKeyImpl() const { const auto& defaultGraphs = describe_.datasetClauses_.defaultGraphs_; if (defaultGraphs.has_value()) { std::vector graphIdVec; - std::ranges::transform(defaultGraphs.value(), - std::back_inserter(graphIdVec), - &TripleComponent::toRdfLiteral); - std::ranges::sort(graphIdVec); + ql::ranges::transform(defaultGraphs.value(), std::back_inserter(graphIdVec), + &TripleComponent::toRdfLiteral); + ql::ranges::sort(graphIdVec); absl::StrAppend(&result, "\nFiltered by Graphs:", absl::StrJoin(graphIdVec, " ")); } @@ -218,7 +217,7 @@ IdTable Describe::getIdsToDescribe(const Result& result, // Copy the `Id`s from the hash set to an `IdTable`. IdTable idsAsTable{1, allocator()}; idsAsTable.resize(idsToDescribe.size()); - std::ranges::copy(idsToDescribe, idsAsTable.getColumn(0).begin()); + ql::ranges::copy(idsToDescribe, idsAsTable.getColumn(0).begin()); return idsAsTable; } diff --git a/src/engine/Distinct.cpp b/src/engine/Distinct.cpp index 2938bc30e6..244e5d5c7c 100644 --- a/src/engine/Distinct.cpp +++ b/src/engine/Distinct.cpp @@ -97,7 +97,7 @@ IdTable Distinct::distinct( LOG(DEBUG) << "Distinct on " << dynInput.size() << " elements.\n"; IdTableStatic result = std::move(dynInput).toStatic(); - // Variant of `std::ranges::unique` that allows to skip the begin rows of + // Variant of `ql::ranges::unique` that allows to skip the begin rows of // elements found in the previous table. auto begin = ql::ranges::find_if(result, [this, &previousRow](const auto& row) { diff --git a/src/engine/LocalVocab.h b/src/engine/LocalVocab.h index e8bd3be550..4dea0e31cc 100644 --- a/src/engine/LocalVocab.h +++ b/src/engine/LocalVocab.h @@ -113,8 +113,8 @@ class LocalVocab { // to this local vocab. The purpose is to keep all the contained // `LocalVocabEntry`s alive as long as this `LocalVocab` is alive. The // primary set of this `LocalVocab` remains unchanged. - template - void mergeWith(const R& vocabs) { + CPP_template(typename R)(requires ql::ranges::range) void mergeWith( + const R& vocabs) { using ql::views::filter; auto addWordSet = [this](const std::shared_ptr& set) { bool added = otherWordSets_.insert(set).second; diff --git a/src/engine/MultiColumnJoin.cpp b/src/engine/MultiColumnJoin.cpp index bb3e4e5995..75852fb69a 100644 --- a/src/engine/MultiColumnJoin.cpp +++ b/src/engine/MultiColumnJoin.cpp @@ -247,7 +247,7 @@ void MultiColumnJoin::computeMultiColumnJoin( // this case we can use a much cheaper algorithm. // TODO There are many other cases where a cheaper implementation can // be chosen, but we leave those for another PR, this is the most common case. - namespace stdr = std::ranges; + namespace stdr = ql::ranges; bool isCheap = stdr::none_of(joinColumns, [&](const auto& jcs) { auto [leftCol, rightCol] = jcs; return (stdr::any_of(right.getColumn(rightCol), &Id::isUndefined)) || diff --git a/src/engine/QueryPlanner.cpp b/src/engine/QueryPlanner.cpp index 9dd6b5599c..b7ede410be 100644 --- a/src/engine/QueryPlanner.cpp +++ b/src/engine/QueryPlanner.cpp @@ -1342,8 +1342,12 @@ size_t QueryPlanner::countSubgraphs( // Remove duplicate plans from `graph`. auto getId = [](const SubtreePlan* v) { return v->_idsOfIncludedNodes; }; ql::ranges::sort(graph, ql::ranges::less{}, getId); - graph.erase(std::ranges::unique(graph, ql::ranges::equal_to{}, getId).begin(), - graph.end()); + auto uniqueIter = ql::ranges::unique(graph, ql::ranges::equal_to{}, getId); +#ifdef QLEVER_CPP_17 + graph.erase(uniqueIter, graph.end()); +#else + graph.erase(uniqueIter.begin(), graph.end()); +#endif // Qlever currently limits the number of triples etc. per group to be <= 64 // anyway, so we can simply assert here. diff --git a/src/engine/Result.h b/src/engine/Result.h index 10b7364a3e..c372cf7102 100644 --- a/src/engine/Result.h +++ b/src/engine/Result.h @@ -173,8 +173,8 @@ class Result { // the name of the function called with the local vocab as argument): // // ExportQueryExecutionTrees::idTableToQLeverJSONArray (idToStringAndType) - // ExportQueryExecutionTrees::selectQueryResultToSparqlJSON (dito) - // ExportQueryExecutionTrees::selectQueryResultToStream (dito) + // ExportQueryExecutionTrees::selectQueryResultToSparqlJSON (ditto) + // ExportQueryExecutionTrees::selectQueryResultToStream (ditto) // Filter::computeFilterImpl (evaluationContext) // Variable::evaluate (idToStringAndType) // @@ -197,9 +197,11 @@ class Result { const Result& result2); // Overload for more than two `Results` - template - requires std::convertible_to, const Result&> - static SharedLocalVocabWrapper getMergedLocalVocab(R&& subResults) { + CPP_template(typename R)( + requires ql::ranges::forward_range CPP_and + std::convertible_to, + const Result&>) static SharedLocalVocabWrapper + getMergedLocalVocab(R&& subResults) { std::vector vocabs; for (const Result& table : subResults) { vocabs.push_back(&table.localVocab()); diff --git a/src/engine/Service.cpp b/src/engine/Service.cpp index 21338be71b..a2791055c0 100644 --- a/src/engine/Service.cpp +++ b/src/engine/Service.cpp @@ -565,7 +565,7 @@ void Service::precomputeSiblingResult(std::shared_ptr left, auto partialResultGenerator = [](std::vector pairs, Result::LazyResult prevGenerator, - std::ranges::iterator_t it) -> Result::Generator { + ql::ranges::iterator_t it) -> Result::Generator { for (auto& pair : pairs) { co_yield pair; } diff --git a/src/engine/TextIndexScanForEntity.cpp b/src/engine/TextIndexScanForEntity.cpp index 6dbce07ef5..276bd4af75 100644 --- a/src/engine/TextIndexScanForEntity.cpp +++ b/src/engine/TextIndexScanForEntity.cpp @@ -20,10 +20,14 @@ ProtoResult TextIndexScanForEntity::computeResult( word_, getExecutionContext()->getAllocator()); if (hasFixedEntity()) { - auto beginErase = std::ranges::remove_if(idTable, [this](const auto& row) { + auto beginErase = ql::ranges::remove_if(idTable, [this](const auto& row) { return row[1].getVocabIndex() != getVocabIndexOfFixedEntity(); }); +#ifdef QLEVER_CPP_17 + idTable.erase(beginErase, idTable.end()); +#else idTable.erase(beginErase.begin(), idTable.end()); +#endif idTable.setColumnSubset(std::vector{0, 2}); } diff --git a/src/engine/TransitivePathBase.h b/src/engine/TransitivePathBase.h index a223e06d95..71607eed5e 100644 --- a/src/engine/TransitivePathBase.h +++ b/src/engine/TransitivePathBase.h @@ -52,7 +52,7 @@ struct TransitivePathSide { auto [tree, col] = treeAndCol_.value(); const std::vector& sortedOn = tree->getRootOperation()->getResultSortedOn(); - // TODO use std::ranges::starts_with + // TODO use ql::ranges::starts_with return (!sortedOn.empty() && sortedOn[0] == col); } }; diff --git a/src/engine/TransitivePathImpl.h b/src/engine/TransitivePathImpl.h index 3e15141114..6898468595 100644 --- a/src/engine/TransitivePathImpl.h +++ b/src/engine/TransitivePathImpl.h @@ -248,9 +248,9 @@ class TransitivePathImpl : public TransitivePathBase { * LocalVocab is a no-op). * @return Map Maps each Id to its connected Ids in the transitive hull */ - NodeGenerator transitiveHull(const T& edges, LocalVocab edgesVocab, - std::ranges::range auto startNodes, - std::optional target, bool yieldOnce) const { + CPP_template(typename Node)(requires ql::ranges::range) NodeGenerator + transitiveHull(const T& edges, LocalVocab edgesVocab, Node startNodes, + std::optional target, bool yieldOnce) const { ad_utility::Timer timer{ad_utility::Timer::Stopped}; for (auto&& tableColumn : startNodes) { timer.cont(); diff --git a/src/engine/idTable/CompressedExternalIdTable.h b/src/engine/idTable/CompressedExternalIdTable.h index 12bc406e16..970777c52d 100644 --- a/src/engine/idTable/CompressedExternalIdTable.h +++ b/src/engine/idTable/CompressedExternalIdTable.h @@ -714,20 +714,20 @@ class CompressedExternalIdTableSorter for (auto& gen : rowGenerators) { pq.emplace_back(gen.begin(), gen.end()); } - std::ranges::make_heap(pq, comp); + ql::ranges::make_heap(pq, comp); IdTableStatic result(this->writer_.numColumns(), this->writer_.allocator()); result.reserve(blockSizeOutput); size_t numPopped = 0; while (!pq.empty()) { - std::ranges::pop_heap(pq, comp); + ql::ranges::pop_heap(pq, comp); auto& min = pq.back(); result.push_back(*min.first); ++(min.first); if (min.first == min.second) { pq.pop_back(); } else { - std::ranges::push_heap(pq, comp); + ql::ranges::push_heap(pq, comp); } if (result.size() >= blockSizeOutput) { numPopped += result.numRows(); diff --git a/src/engine/idTable/IdTable.h b/src/engine/idTable/IdTable.h index 9e57073602..5a046b80f6 100644 --- a/src/engine/idTable/IdTable.h +++ b/src/engine/idTable/IdTable.h @@ -13,6 +13,7 @@ #include #include +#include "backports/algorithm.h" #include "engine/idTable/IdTableRow.h" #include "engine/idTable/VectorWithElementwiseMove.h" #include "global/Id.h" @@ -203,8 +204,8 @@ class IdTable { // fails. Additional columns (if `columns.size() > numColumns`) are deleted. // This behavior is useful for unit tests Where we can just generically pass // in more columns than are needed in any test. - IdTable(size_t numColumns, std::ranges::forward_range auto columns) - requires(!isView) + CPP_template(typename ColT)(requires ql::ranges::forward_range) + IdTable(size_t numColumns, ColT columns) requires(!isView) : data_{std::make_move_iterator(columns.begin()), std::make_move_iterator(columns.end())}, numColumns_{numColumns} { @@ -364,7 +365,7 @@ class IdTable { } // The usual `front` and `back` functions to make the interface similar to - // `std::vector` aand other containers. + // `std::vector` and other containers. // TODO Remove the duplicates via explicit object parameters // ("deducing this"). row_reference_restricted front() requires(!isView) { return (*this)[0]; } @@ -430,9 +431,11 @@ class IdTable { // otherwise the behavior is undefined (in Release mode) or an assertion will // fail (in Debug mode). The `newRow` can be any random access range that // stores the right type and has the right size. - template - requires std::same_as, T> - void push_back(const RowLike& newRow) requires(!isView) { + CPP_template(typename RowLike)( + requires ql::ranges::random_access_range CPP_and + std::same_as, + T>) void push_back(const RowLike& newRow) + requires(!isView) { AD_EXPENSIVE_CHECK(newRow.size() == numColumns()); ++numRows_; ql::ranges::for_each(ad_utility::integerRange(numColumns()), @@ -442,7 +445,7 @@ class IdTable { } void push_back(const std::initializer_list& newRow) requires(!isView) { - push_back(std::ranges::ref_view{newRow}); + push_back(ql::ranges::ref_view{newRow}); } // True iff we can make a copy (via the `clone` function below), because the diff --git a/src/engine/sparqlExpressions/AggregateExpression.h b/src/engine/sparqlExpressions/AggregateExpression.h index c5f77b67a1..71a0a6b36f 100644 --- a/src/engine/sparqlExpressions/AggregateExpression.h +++ b/src/engine/sparqlExpressions/AggregateExpression.h @@ -28,9 +28,9 @@ inline auto getUniqueElements = []( const EvaluationContext* context, size_t inputSize, OperandGenerator operandGenerator) - -> cppcoro::generator> { + -> cppcoro::generator> { ad_utility::HashSetWithMemoryLimit< - std::ranges::range_value_t> + ql::ranges::range_value_t> uniqueHashSet(inputSize, context->_allocator); for (auto& operand : operandGenerator) { if (uniqueHashSet.insert(operand).second) { diff --git a/src/engine/sparqlExpressions/PrefilterExpressionIndex.cpp b/src/engine/sparqlExpressions/PrefilterExpressionIndex.cpp index 616d432429..f32097547a 100644 --- a/src/engine/sparqlExpressions/PrefilterExpressionIndex.cpp +++ b/src/engine/sparqlExpressions/PrefilterExpressionIndex.cpp @@ -124,9 +124,9 @@ static auto getSetUnion(const std::vector& blocks1, return b1.blockIndex_ < b2.blockIndex_; }; // Given that we have vectors with sorted (BlockMedata) values, we can - // use std::ranges::set_union. Thus the complexity is O(n + m). - std::ranges::set_union(blocks1, blocks2, std::back_inserter(mergedVectors), - blockLessThanBlock); + // use ql::ranges::set_union. Thus the complexity is O(n + m). + ql::ranges::set_union(blocks1, blocks2, std::back_inserter(mergedVectors), + blockLessThanBlock); mergedVectors.shrink_to_fit(); return mergedVectors; } diff --git a/src/engine/sparqlExpressions/RelationalExpressions.cpp b/src/engine/sparqlExpressions/RelationalExpressions.cpp index 603e334c69..a23ce3da95 100644 --- a/src/engine/sparqlExpressions/RelationalExpressions.cpp +++ b/src/engine/sparqlExpressions/RelationalExpressions.cpp @@ -150,7 +150,7 @@ requires AreComparable ExpressionResult evaluateRelationalExpression( auto impl = [&](const auto& value2) -> std::optional { auto columnIndex = context->getColumnIndexForVariable(value1); auto valueId = makeValueId(value2, context); - // TODO Use `std::ranges::starts_with`. + // TODO Use `ql::ranges::starts_with`. if (const auto& cols = context->_columnsByWhichResultIsSorted; !cols.empty() && cols[0] == columnIndex) { constexpr static bool value2IsString = diff --git a/src/engine/sparqlExpressions/SparqlExpressionGenerators.h b/src/engine/sparqlExpressions/SparqlExpressionGenerators.h index 33b46914d0..d7209cacab 100644 --- a/src/engine/sparqlExpressions/SparqlExpressionGenerators.h +++ b/src/engine/sparqlExpressions/SparqlExpressionGenerators.h @@ -50,9 +50,9 @@ cppcoro::generator>> } } -template -requires(std::ranges::input_range) -auto resultGenerator(T&& vector, size_t numItems, Transformation transformation = {}) { +CPP_template(typename T, typename Transformation = std::identity)( + requires ql::ranges::input_range) auto resultGenerator(T&& vector, size_t numItems, + Transformation transformation = {}) { AD_CONTRACT_CHECK(numItems == vector.size()); return ad_utility::allView(AD_FWD(vector)) | ql::views::transform(std::move(transformation)); } @@ -110,7 +110,7 @@ inline auto valueGetterGenerator = []( Function&& function, size_t numItems, Generators... generators) -> cppcoro::generator< - std::invoke_result_t...>> { + std::invoke_result_t...>> { // A tuple holding one iterator to each of the generators. std::tuple iterators{generators.begin()...}; diff --git a/src/global/ValueIdComparators.h b/src/global/ValueIdComparators.h index bfbf7bb551..62338ef4ed 100644 --- a/src/global/ValueIdComparators.h +++ b/src/global/ValueIdComparators.h @@ -361,7 +361,7 @@ inline auto simplifyRanges = return input; } // Merge directly adjacent ranges. - // TODO use `std::ranges` + // TODO use `ql::ranges` decltype(input) result; result.push_back(input.front()); for (auto it = input.begin() + 1; it != input.end(); ++it) { diff --git a/src/index/CompressedRelation.cpp b/src/index/CompressedRelation.cpp index fc306e892f..bac482b72c 100644 --- a/src/index/CompressedRelation.cpp +++ b/src/index/CompressedRelation.cpp @@ -178,9 +178,13 @@ bool CompressedRelationReader::FilterDuplicatesAndGraphs:: }; }; if (needsFilteringByGraph) { - auto [beginOfRemoved, _] = std::ranges::remove_if( + auto removedRange = ql::ranges::remove_if( block, std::not_fn(isDesiredGraphId()), graphIdFromRow); - block.erase(beginOfRemoved, block.end()); +#ifdef QLEVER_CPP_17 + block.erase(removedRange, block.end()); +#else + block.erase(removedRange.begin(), block.end()); +#endif } else { AD_EXPENSIVE_CHECK( !desiredGraphs_.has_value() || @@ -194,10 +198,10 @@ bool CompressedRelationReader::FilterDuplicatesAndGraphs:: filterDuplicatesIfNecessary(IdTable& block, const CompressedBlockMetadata& blockMetadata) { if (!blockMetadata.containsDuplicatesWithDifferentGraphs_) { - AD_EXPENSIVE_CHECK(std::ranges::unique(block).begin() == block.end()); + AD_EXPENSIVE_CHECK(std::unique(block.begin(), block.end()) == block.end()); return false; } - auto [endUnique, _] = std::ranges::unique(block); + auto endUnique = std::unique(block.begin(), block.end()); block.erase(endUnique, block.end()); return true; } @@ -400,7 +404,7 @@ std::vector CompressedRelationReader::getBlocksForJoin( }; // `blockLessThanBlock` (a dummy) and `std::less` are only needed to - // fulfill a concept for the `std::ranges` algorithms. + // fulfill a concept for the `ql::ranges` algorithms. auto blockLessThanBlock = [](const CompressedBlockMetadata&, const CompressedBlockMetadata&) @@ -422,7 +426,8 @@ std::vector CompressedRelationReader::getBlocksForJoin( ql::ranges::copy(relevantBlocks | ql::views::filter(blockIsNeeded), std::back_inserter(result)); // The following check is cheap as there are only few blocks. - AD_CORRECTNESS_CHECK(std::ranges::unique(result).empty()); + AD_CORRECTNESS_CHECK(std::unique(result.begin(), result.end()) == + result.end()); return result; } @@ -482,7 +487,8 @@ CompressedRelationReader::getBlocksForJoin( } } // The following check isn't expensive as there are only few blocks. - AD_CORRECTNESS_CHECK(std::ranges::unique(result).begin() == result.end()); + AD_CORRECTNESS_CHECK(std::unique(result.begin(), result.end()) == + result.end()); return result; }; @@ -948,7 +954,7 @@ static std::pair>> getGraphInfo( ql::ranges::copy(block->getColumn(ADDITIONAL_COLUMN_GRAPH_ID), std::back_inserter(graphColumn)); ql::ranges::sort(graphColumn); - auto [endOfUnique, _] = std::ranges::unique(graphColumn); + auto endOfUnique = std::unique(graphColumn.begin(), graphColumn.end()); size_t numGraphs = endOfUnique - graphColumn.begin(); if (numGraphs > MAX_NUM_GRAPHS_STORED_IN_BLOCK_METADATA) { return std::nullopt; @@ -1522,9 +1528,10 @@ CompressedRelationReader::getMetadataForSmallRelation( } // The `col1` is sorted, so we compute the multiplicity using - // `std::ranges::unique`. - auto endOfUnique = std::ranges::unique(block.getColumn(0)); - size_t numDistinct = endOfUnique.begin() - block.getColumn(0).begin(); + // `std::unique`. + const auto& blockCol = block.getColumn(0); + auto endOfUnique = std::unique(blockCol.begin(), blockCol.end()); + size_t numDistinct = endOfUnique - blockCol.begin(); metadata.numRows_ = block.size(); metadata.multiplicityCol1_ = CompressedRelationWriter::computeMultiplicity(block.size(), numDistinct); diff --git a/src/index/DeltaTriples.cpp b/src/index/DeltaTriples.cpp index 8b69c8d363..3460babfd7 100644 --- a/src/index/DeltaTriples.cpp +++ b/src/index/DeltaTriples.cpp @@ -146,8 +146,8 @@ void DeltaTriples::modifyTriplesImpl(CancellationHandle cancellationHandle, TriplesToHandlesMap& inverseMap) { rewriteLocalVocabEntriesAndBlankNodes(triples); ql::ranges::sort(triples); - auto [first, last] = std::ranges::unique(triples); - triples.erase(first, last); + auto first = std::unique(triples.begin(), triples.end()); + triples.erase(first, triples.end()); std::erase_if(triples, [&targetMap](const IdTriple<0>& triple) { return targetMap.contains(triple); }); diff --git a/src/index/IndexImpl.Text.cpp b/src/index/IndexImpl.Text.cpp index d1e6a70777..81c135fdea 100644 --- a/src/index/IndexImpl.Text.cpp +++ b/src/index/IndexImpl.Text.cpp @@ -747,7 +747,7 @@ IdTable IndexImpl::readWordCl( idTable.getColumn(1).begin(), [](WordIndex id) { return Id::makeFromWordVocabIndex(WordVocabIndex::make(id)); }); - std::ranges::transform( + ql::ranges::transform( readFreqComprList(tbmd._cl._nofElements, tbmd._cl._startScorelist, static_cast(tbmd._cl._lastByte + 1 - tbmd._cl._startScorelist)), diff --git a/src/index/IndexImpl.cpp b/src/index/IndexImpl.cpp index 1f7279cd3c..e718658b9b 100644 --- a/src/index/IndexImpl.cpp +++ b/src/index/IndexImpl.cpp @@ -684,10 +684,11 @@ auto IndexImpl::convertPartialToGlobalIds( for (Buffer::row_reference triple : *triples) { transformTriple(triple, *idMap); } - auto [beginInternal, endInternal] = std::ranges::partition( - *triples, [&isQLeverInternalTriple](const auto& row) { - return !isQLeverInternalTriple(row); - }); + auto beginInternal = + std::partition(triples->begin(), triples->end(), + [&isQLeverInternalTriple](const auto& row) { + return !isQLeverInternalTriple(row); + }); IdTableStatic internalTriples( triples->getAllocator()); // TODO We could leave the partitioned complete block as is, @@ -695,7 +696,7 @@ auto IndexImpl::convertPartialToGlobalIds( // push only a part of a block. We then would safe the copy of the // internal triples here, but I am not sure whether this is worth it. internalTriples.insertAtEnd(*triples, beginInternal - triples->begin(), - endInternal - triples->begin()); + triples->end() - triples->begin()); triples->resize(beginInternal - triples->begin()); Buffers buffers{std::move(*triples), std::move(internalTriples)}; diff --git a/src/index/StringSortComparator.h b/src/index/StringSortComparator.h index da0324ff5e..81829f226e 100644 --- a/src/index/StringSortComparator.h +++ b/src/index/StringSortComparator.h @@ -313,7 +313,7 @@ class LocaleManager { * different steps in icu. */ std::unique_ptr _collator[6]; UColAttributeValue _ignorePunctuationStatus = - UCOL_NON_IGNORABLE; // how to sort punctuations etc. + UCOL_NON_IGNORABLE; // how to sort punctuation etc. const icu::Normalizer2* _normalizer = nullptr; // actually locale-independent but useful to be placed here diff --git a/src/index/vocabulary/VocabularyInMemoryBinSearch.cpp b/src/index/vocabulary/VocabularyInMemoryBinSearch.cpp index 268cc33721..95d082ce5a 100644 --- a/src/index/vocabulary/VocabularyInMemoryBinSearch.cpp +++ b/src/index/vocabulary/VocabularyInMemoryBinSearch.cpp @@ -33,7 +33,7 @@ std::optional VocabularyInMemoryBinSearch::operator[]( // _____________________________________________________________________________ WordAndIndex VocabularyInMemoryBinSearch::iteratorToWordAndIndex( - std::ranges::iterator_t it) const { + ql::ranges::iterator_t it) const { if (it == words_.end()) { return WordAndIndex::end(); } diff --git a/src/index/vocabulary/VocabularyInMemoryBinSearch.h b/src/index/vocabulary/VocabularyInMemoryBinSearch.h index 494caeaf66..8367c1e965 100644 --- a/src/index/vocabulary/VocabularyInMemoryBinSearch.h +++ b/src/index/vocabulary/VocabularyInMemoryBinSearch.h @@ -60,7 +60,7 @@ class VocabularyInMemoryBinSearch std::optional operator[](uint64_t index) const; // Convert an iterator to a `WordAndIndex`. Required for the mixin. - WordAndIndex iteratorToWordAndIndex(std::ranges::iterator_t it) const; + WordAndIndex iteratorToWordAndIndex(ql::ranges::iterator_t it) const; // A helper type that can be used to directly write a vocabulary to disk // word-by-word, without having to materialize it in RAM first. diff --git a/src/index/vocabulary/VocabularyInternalExternal.h b/src/index/vocabulary/VocabularyInternalExternal.h index b73066a5f1..f9024369bd 100644 --- a/src/index/vocabulary/VocabularyInternalExternal.h +++ b/src/index/vocabulary/VocabularyInternalExternal.h @@ -123,11 +123,11 @@ class VocabularyInternalExternal { // Convert an iterator (which can be an iterator to the external or internal // vocabulary) into the corresponding index by (logically) subtracting // `begin()`. - uint64_t iteratorToIndex(std::ranges::iterator_t it) const { + uint64_t iteratorToIndex(ql::ranges::iterator_t it) const { return it - externalVocab_.begin(); } uint64_t iteratorToIndex( - std::ranges::iterator_t it) const { + ql::ranges::iterator_t it) const { return internalVocab_.indices().at(it - internalVocab_.begin()); } diff --git a/src/parser/sparqlParser/SparqlQleverVisitor.cpp b/src/parser/sparqlParser/SparqlQleverVisitor.cpp index f23530f820..e22454cfd7 100644 --- a/src/parser/sparqlParser/SparqlQleverVisitor.cpp +++ b/src/parser/sparqlParser/SparqlQleverVisitor.cpp @@ -311,8 +311,8 @@ ParsedQuery Visitor::visit(Parser::DescribeQueryContext* ctx) { if (describedResources.empty()) { const auto& visibleVariables = parsedQuery_.selectClause().getVisibleVariables(); - std::ranges::copy(visibleVariables, - std::back_inserter(describeClause.resources_)); + ql::ranges::copy(visibleVariables, + std::back_inserter(describeClause.resources_)); describedVariables = visibleVariables; } auto& selectClause = parsedQuery_.selectClause(); diff --git a/src/util/Algorithm.h b/src/util/Algorithm.h index c2229e0e82..50e23f8244 100644 --- a/src/util/Algorithm.h +++ b/src/util/Algorithm.h @@ -129,11 +129,12 @@ std::vector flatten(std::vector>&& input) { // used to keep track of which values we have already seen. One of these // copies could be avoided, but our current uses of this function are // currently not at all performance-critical (small `input` and small `T`). -template -auto removeDuplicates(const Range& input) -> std::vector< - typename std::iterator_traits>::value_type> { +CPP_template(typename Range)(requires ql::ranges::forward_range< + Range>) auto removeDuplicates(const Range& input) + -> std::vector>::value_type> { using T = - typename std::iterator_traits>::value_type; + typename std::iterator_traits>::value_type; std::vector result; ad_utility::HashSet distinctElements; for (const T& element : input) { diff --git a/src/util/AsioHelpers.h b/src/util/AsioHelpers.h index 2e75cd2048..6ae990a44d 100644 --- a/src/util/AsioHelpers.h +++ b/src/util/AsioHelpers.h @@ -133,7 +133,7 @@ inline net::awaitable interruptible( running->clear(); net::dispatch(strand, [timer = std::move(timer)]() { timer->cancel(); }); }; - // Provide callback to outer world in order to cancel the timer pre-emptively. + // Provide callback to outer world in order to cancel the timer preemptively. cancelCallback.set_value(cancelTimer); auto timerLoop = [](std::shared_ptr timer, diff --git a/src/util/BlankNodeManager.h b/src/util/BlankNodeManager.h index 3ff7613768..75e4427c2e 100644 --- a/src/util/BlankNodeManager.h +++ b/src/util/BlankNodeManager.h @@ -90,8 +90,8 @@ class BlankNodeManager { // Merge passed `LocalBlankNodeManager`s to keep alive their reserved // BlankNodeIndex blocks. - template - void mergeWith(const R& localBlankNodeManagers) { + CPP_template(typename R)(requires ql::ranges::range) void mergeWith( + const R& localBlankNodeManagers) { auto inserter = std::back_inserter(otherBlocks_); for (const auto& l : localBlankNodeManagers) { if (l == nullptr) { diff --git a/src/util/ChunkedForLoop.h b/src/util/ChunkedForLoop.h index 3e2dbdc4e8..97d2efff93 100644 --- a/src/util/ChunkedForLoop.h +++ b/src/util/ChunkedForLoop.h @@ -68,22 +68,22 @@ inline void chunkedForLoop(std::size_t start, std::size_t end, // Helper concept that combines the sized range and input range concepts. template concept SizedInputRange = - std::ranges::sized_range && std::ranges::input_range; + ql::ranges::sized_range && ql::ranges::input_range; // Similar to `ql::ranges::copy`, but invokes `chunkOperation` every // `chunkSize` elements. (Round up to the next chunk size if the range size is // not a multiple of `chunkSize`.) template inline void chunkedCopy(R&& inputRange, O result, - std::ranges::range_difference_t chunkSize, + ql::ranges::range_difference_t chunkSize, const std::invocable auto& chunkOperation) - requires std::indirectly_copyable, O> { + requires std::indirectly_copyable, O> { auto begin = ql::ranges::begin(inputRange); auto end = ql::ranges::end(inputRange); auto target = result; while (ql::ranges::distance(begin, end) >= chunkSize) { auto start = begin; - std::ranges::advance(begin, chunkSize); + ql::ranges::advance(begin, chunkSize); target = ql::ranges::copy(start, begin, target).out; chunkOperation(); } @@ -94,20 +94,20 @@ inline void chunkedCopy(R&& inputRange, O result, // Helper concept that combines the sized range and output range concepts. template concept SizedOutputRange = - std::ranges::sized_range && std::ranges::output_range; + ql::ranges::sized_range && ql::ranges::output_range; // Similar to `ql::ranges::fill`, but invokes `chunkOperation` every // `chunkSize` elements. (Round up to the next chunk size if the range size is // not a multiple of `chunkSize`.) template R> inline void chunkedFill(R&& outputRange, const T& value, - std::ranges::range_difference_t chunkSize, + ql::ranges::range_difference_t chunkSize, const std::invocable auto& chunkOperation) { auto begin = ql::ranges::begin(outputRange); auto end = ql::ranges::end(outputRange); while (ql::ranges::distance(begin, end) >= chunkSize) { auto start = begin; - std::ranges::advance(begin, chunkSize); + ql::ranges::advance(begin, chunkSize); ql::ranges::fill(start, begin, value); chunkOperation(); } diff --git a/src/util/Generators.h b/src/util/Generators.h index db232d0188..6e9d9becd2 100644 --- a/src/util/Generators.h +++ b/src/util/Generators.h @@ -20,7 +20,7 @@ namespace ad_utility { // discarded. If the cached value is still present once the generator is fully // consumed, `onFullyCached` is called with the cached value. template > + typename T = ql::ranges::range_value_t> cppcoro::generator wrapGeneratorWithCache( InputRange generator, InvocableWithExactReturnType&, const T&> auto diff --git a/src/util/JoinAlgorithms/JoinAlgorithms.h b/src/util/JoinAlgorithms/JoinAlgorithms.h index 89e1b6611f..9b6e8bcdbb 100644 --- a/src/util/JoinAlgorithms/JoinAlgorithms.h +++ b/src/util/JoinAlgorithms/JoinAlgorithms.h @@ -26,14 +26,14 @@ namespace ad_utility { // single argument of the `Range`'s iterator type (NOT value type). template concept UnaryIteratorFunction = - std::invocable>; + std::invocable>; // A function `F` fulfills `BinaryIteratorFunction` if it can be called with // two arguments of the `Range`'s iterator type (NOT value type). template concept BinaryIteratorFunction = - std::invocable, - std::ranges::iterator_t>; + std::invocable, + ql::ranges::iterator_t>; // Note: In the following functions, two rows of IDs are called `compatible` if // for each position they are equal, or at least one of them is UNDEF. This is @@ -81,19 +81,20 @@ concept BinaryIteratorFunction = * described cases leads to two sorted ranges in the output, this can possibly * be exploited to fix the result in a cheaper way than a full sort. */ -template -[[nodiscard]] auto zipperJoinWithUndef( - const Range1& left, const Range2& right, const LessThan& lessThan, - const auto& compatibleRowAction, - const FindSmallerUndefRangesLeft& findSmallerUndefRangesLeft, - const FindSmallerUndefRangesRight& findSmallerUndefRangesRight, - ElFromFirstNotFoundAction elFromFirstNotFoundAction = {}, - CheckCancellation checkCancellation = {}) { +CPP_template(typename Range1, typename Range2, typename LessThan, + typename FindSmallerUndefRangesLeft, + typename FindSmallerUndefRangesRight, + typename ElFromFirstNotFoundAction = decltype(noop), + typename CheckCancellation = decltype(noop))( + requires ql::ranges::random_access_range CPP_and + ql::ranges::random_access_range) + [[nodiscard]] auto zipperJoinWithUndef( + const Range1& left, const Range2& right, const LessThan& lessThan, + const auto& compatibleRowAction, + const FindSmallerUndefRangesLeft& findSmallerUndefRangesLeft, + const FindSmallerUndefRangesRight& findSmallerUndefRangesRight, + ElFromFirstNotFoundAction elFromFirstNotFoundAction = {}, + CheckCancellation checkCancellation = {}) { // If this is not an OPTIONAL join or a MINUS we can apply several // optimizations, so we store this information. static constexpr bool hasNotFoundAction = @@ -320,15 +321,20 @@ template -void gallopingJoin( - const RangeSmaller& smaller, const RangeLarger& larger, - auto const& lessThan, auto const& action, - ElementFromSmallerNotFoundAction elementFromSmallerNotFoundAction = {}, - CheckCancellation checkCancellation = {}) { +CPP_template(typename RangeSmaller, typename RangeLarger, + typename ElementFromSmallerNotFoundAction = Noop, + typename CheckCancellation = Noop)( + requires ql::ranges::random_access_range CPP_and + ql::ranges::random_access_range< + RangeLarger>) void gallopingJoin(const RangeSmaller& smaller, + const RangeLarger& larger, + auto const& lessThan, + auto const& action, + ElementFromSmallerNotFoundAction + elementFromSmallerNotFoundAction = + {}, + CheckCancellation + checkCancellation = {}) { auto itSmall = std::begin(smaller); auto endSmall = std::end(smaller); auto itLarge = std::begin(larger); @@ -669,7 +675,7 @@ struct JoinSide { CurrentBlocks undefBlocks_{}; // Type aliases for a single element from a block from the left/right input. - using value_type = std::ranges::range_value_t>; + using value_type = ql::ranges::range_value_t>; // Type alias for the result of the projection. using ProjectedEl = std::decay_t>; diff --git a/src/util/ParallelMultiwayMerge.h b/src/util/ParallelMultiwayMerge.h index 27db2e4e69..fa17cd57a8 100644 --- a/src/util/ParallelMultiwayMerge.h +++ b/src/util/ParallelMultiwayMerge.h @@ -35,15 +35,15 @@ constexpr auto pushSingleElement = // This concept is fulfilled if `Range` is a range that stores values of type // `T`. template -concept RangeWithValue = std::ranges::range && - std::same_as, T>; +concept RangeWithValue = ql::ranges::range && + std::same_as, T>; // Fulfilled if `Range` is a random access range the elements of which are // ranges of elements of type `T`, e.g. `std::vector>`. template concept RandomAccessRangeOfRanges = - std::ranges::random_access_range && - RangeWithValue, T>; + ql::ranges::random_access_range && + RangeWithValue, T>; // Merge the elements from the presorted ranges `range1` and `range2` according // to the `comparator`. The result of the merging will be yielded in blocks of diff --git a/src/util/StringUtils.h b/src/util/StringUtils.h index 493e7759cc..9a629cc72d 100644 --- a/src/util/StringUtils.h +++ b/src/util/StringUtils.h @@ -105,17 +105,18 @@ streams. @param separator Will be put between each of the string representations of the range elements. */ -template -requires ad_utility::Streamable< - std::iter_reference_t>> -void lazyStrJoin(std::ostream* stream, Range&& r, std::string_view separator); +CPP_template(typename Range)( + requires ql::ranges::input_range CPP_and + ad_utility::Streamable>>) void lazyStrJoin(std::ostream* stream, Range&& r, + std::string_view separator); // Similar to the overload of `lazyStrJoin` above, but the result is returned as // a string. -template -requires ad_utility::Streamable< - std::iter_reference_t>> -std::string lazyStrJoin(Range&& r, std::string_view separator); +CPP_template(typename Range)( + requires ql::ranges::input_range CPP_and ad_utility::Streamable< + std::iter_reference_t>>) std::string + lazyStrJoin(Range&& r, std::string_view separator); /* @brief Adds indentation before the given string and directly after new line @@ -190,10 +191,11 @@ constexpr bool constantTimeEquals(std::string_view view1, } // _________________________________________________________________________ -template -requires ad_utility::Streamable< - std::iter_reference_t>> -void lazyStrJoin(std::ostream* stream, Range&& r, std::string_view separator) { +CPP_template(typename Range)( + requires ql::ranges::input_range CPP_and + ad_utility::Streamable>>) void lazyStrJoin(std::ostream* stream, Range&& r, + std::string_view separator) { auto begin = std::begin(r); auto end = std::end(r); @@ -215,10 +217,10 @@ void lazyStrJoin(std::ostream* stream, Range&& r, std::string_view separator) { } // _________________________________________________________________________ -template -requires ad_utility::Streamable< - std::iter_reference_t>> -std::string lazyStrJoin(Range&& r, std::string_view separator) { +CPP_template(typename Range)( + requires ql::ranges::input_range CPP_and ad_utility::Streamable< + std::iter_reference_t>>) std::string + lazyStrJoin(Range&& r, std::string_view separator) { std::ostringstream stream; lazyStrJoin(&stream, AD_FWD(r), separator); return std::move(stream).str(); diff --git a/test/OperationTest.cpp b/test/OperationTest.cpp index ec9ab35c7f..2afbe38a83 100644 --- a/test/OperationTest.cpp +++ b/test/OperationTest.cpp @@ -24,7 +24,7 @@ using Status = RuntimeInformation::Status; namespace { // Helper function to perform actions at various stages of a generator -template > +template > auto expectAtEachStageOfGenerator( Range generator, std::vector> functions, ad_utility::source_location l = ad_utility::source_location::current()) {