Skip to content

Commit

Permalink
Faster hash modes test (#7973)
Browse files Browse the repository at this point in the history
Summary:
AggregationTest.hashmodes covers the required cases with less data in about half the time. Less timeouts. Checks that the end state is kHash.

Pull Request resolved: #7973

Reviewed By: kevinwilfong

Differential Revision: D52055200

Pulled By: oerling

fbshipit-source-id: b1af9ff0e21ceb76d09f67560034bdc382420aff
  • Loading branch information
Orri Erling authored and facebook-github-bot committed Dec 12, 2023
1 parent 2de6243 commit bcb03cd
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 2 deletions.
1 change: 1 addition & 0 deletions velox/exec/HashTable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1231,6 +1231,7 @@ void HashTable<ignoreNullKeys>::rehash(bool initNormalizedKeys) {
template <bool ignoreNullKeys>
void HashTable<ignoreNullKeys>::setHashMode(HashMode mode, int32_t numNew) {
VELOX_CHECK_NE(hashMode_, HashMode::kHash);
TestValue::adjust("facebook::velox::exec::HashTable::setHashMode", &mode);
if (mode == HashMode::kArray) {
const auto bytes = capacity_ * tableSlotSize();
const auto numPages = memory::AllocationTraits::numPages(bytes);
Expand Down
11 changes: 9 additions & 2 deletions velox/exec/tests/AggregationTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -697,21 +697,28 @@ TEST_F(AggregationTest, hashmodes) {
makeModeTestKeys(rowType, 20000, 2, 2, 2, 4, 4, 4, batches);
// 20K rows with all at slightly higher cardinality, still in array range.
makeModeTestKeys(rowType, 20000, 2, 2, 2, 4, 16, 4, batches);
// 100K rows with cardinality outside of array range. We transit to
// 25K rows with cardinality outside of array range. We transit to
// generic hash table from normalized keys when running out of quota
// for distinct string storage for the sixth key.
makeModeTestKeys(rowType, 100000, 1000000, 2, 2, 4, 4, 1000000, batches);
makeModeTestKeys(rowType, 25000, 1000000, 2, 2, 4, 4, 1000000, batches);
createDuckDbTable(batches);
auto op =
PlanBuilder()
.values(batches)
.singleAggregation({"c0", "c1", "c2", "c3", "c4", "c5"}, {"sum(1)"})
.planNode();

std::atomic<BaseHashTable::HashMode> mode{BaseHashTable::HashMode::kArray};
SCOPED_TESTVALUE_SET(
"facebook::velox::exec::HashTable::setHashMode",
std::function<void(void*)>([&](void* newMode) {
mode = *reinterpret_cast<BaseHashTable::HashMode*>(newMode);
}));
assertQuery(
op,
"SELECT c0, c1, C2, C3, C4, C5, sum(1) FROM tmp "
" GROUP BY c0, c1, c2, c3, c4, c5");
EXPECT_EQ(mode, BaseHashTable::HashMode::kHash);
}

TEST_F(AggregationTest, rangeToDistinct) {
Expand Down

0 comments on commit bcb03cd

Please sign in to comment.