Skip to content

MINOR: Change display of Window and Aggregate Functions #8353

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Closed
wants to merge 12 commits into from
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ mod tests {
);
// should not combine the Partial/Final AggregateExecs
let expected = &[
"AggregateExec: mode=Final, gby=[], aggr=[COUNT(2)]",
"AggregateExec: mode=Final, gby=[], aggr=[COUNT(1)]",
"AggregateExec: mode=Partial, gby=[], aggr=[COUNT(1)]",
"ParquetExec: file_groups={1 group: [[x]]}, projection=[a, b, c]",
];
Expand Down Expand Up @@ -424,7 +424,7 @@ mod tests {
let plan = final_aggregate_exec(partial_agg, final_group_by, aggr_expr);
// should combine the Partial/Final AggregateExecs to tne Single AggregateExec
let expected = &[
"AggregateExec: mode=Single, gby=[c@2 as c], aggr=[Sum(b)]",
"AggregateExec: mode=Single, gby=[c@2 as c], aggr=[SUM(b@1)]",
"ParquetExec: file_groups={1 group: [[x]]}, projection=[a, b, c]",
];

Expand Down
34 changes: 17 additions & 17 deletions datafusion/core/src/physical_optimizer/enforce_sorting.rs
Original file line number Diff line number Diff line change
Expand Up @@ -906,17 +906,17 @@ mod tests {

let physical_plan = bounded_window_exec("non_nullable_col", sort_exprs, filter);

let expected_input = ["BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
let expected_input = ["BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [non_nullable_col@1 ASC NULLS LAST], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that is certainly much nicer 👍

" FilterExec: NOT non_nullable_col@1",
" SortExec: expr=[non_nullable_col@1 ASC NULLS LAST]",
" BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [non_nullable_col@1 DESC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" CoalesceBatchesExec: target_batch_size=128",
" SortExec: expr=[non_nullable_col@1 DESC]",
" MemoryExec: partitions=1, partition_sizes=[0]"];

let expected_optimized = ["WindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: CurrentRow, end_bound: Following(NULL) }]",
let expected_optimized = ["WindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [non_nullable_col@1 DESC], frame: WindowFrame { units: Range, start_bound: CurrentRow, end_bound: Following(NULL) }]",
" FilterExec: NOT non_nullable_col@1",
" BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [non_nullable_col@1 DESC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" CoalesceBatchesExec: target_batch_size=128",
" SortExec: expr=[non_nullable_col@1 DESC]",
" MemoryExec: partitions=1, partition_sizes=[0]"];
Expand Down Expand Up @@ -1639,15 +1639,15 @@ mod tests {
// corresponding SortExecs together. Also, the inputs of these `SortExec`s
// are not necessarily the same to be able to remove them.
let expected_input = [
"BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
"BoundedWindowAggExec: wdw=[COUNT(nullable_col@0) ORDER BY [nullable_col@0 DESC NULLS LAST], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" SortPreservingMergeExec: [nullable_col@0 DESC NULLS LAST]",
" UnionExec",
" SortExec: expr=[nullable_col@0 DESC NULLS LAST]",
" ParquetExec: file_groups={1 group: [[x]]}, projection=[nullable_col, non_nullable_col], output_ordering=[nullable_col@0 ASC, non_nullable_col@1 ASC]",
" SortExec: expr=[nullable_col@0 DESC NULLS LAST]",
" ParquetExec: file_groups={1 group: [[x]]}, projection=[nullable_col, non_nullable_col], output_ordering=[nullable_col@0 ASC]"];
let expected_optimized = [
"WindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: CurrentRow, end_bound: Following(NULL) }]",
"WindowAggExec: wdw=[COUNT(nullable_col@0) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: CurrentRow, end_bound: Following(NULL) }]",
" SortPreservingMergeExec: [nullable_col@0 ASC]",
" UnionExec",
" ParquetExec: file_groups={1 group: [[x]]}, projection=[nullable_col, non_nullable_col], output_ordering=[nullable_col@0 ASC, non_nullable_col@1 ASC]",
Expand Down Expand Up @@ -1676,14 +1676,14 @@ mod tests {

// The `WindowAggExec` can get its required sorting from the leaf nodes directly.
// The unnecessary SortExecs should be removed
let expected_input = ["BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
let expected_input = ["BoundedWindowAggExec: wdw=[COUNT(nullable_col@0) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" SortPreservingMergeExec: [nullable_col@0 ASC,non_nullable_col@1 ASC]",
" UnionExec",
" SortExec: expr=[nullable_col@0 ASC,non_nullable_col@1 ASC]",
" ParquetExec: file_groups={1 group: [[x]]}, projection=[nullable_col, non_nullable_col], output_ordering=[nullable_col@0 ASC]",
" SortExec: expr=[nullable_col@0 ASC,non_nullable_col@1 ASC]",
" ParquetExec: file_groups={1 group: [[x]]}, projection=[nullable_col, non_nullable_col], output_ordering=[nullable_col@0 ASC]"];
let expected_optimized = ["BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
let expected_optimized = ["BoundedWindowAggExec: wdw=[COUNT(nullable_col@0) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" SortPreservingMergeExec: [nullable_col@0 ASC]",
" UnionExec",
" ParquetExec: file_groups={1 group: [[x]]}, projection=[nullable_col, non_nullable_col], output_ordering=[nullable_col@0 ASC]",
Expand Down Expand Up @@ -1971,15 +1971,15 @@ mod tests {
let physical_plan =
bounded_window_exec("non_nullable_col", sort_exprs1, window_agg2);

let expected_input = ["BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
let expected_input = ["BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [nullable_col@0 ASC, non_nullable_col@1 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" SortExec: expr=[nullable_col@0 ASC]",
" MemoryExec: partitions=1, partition_sizes=[0]"];

let expected_optimized = ["BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
let expected_optimized = ["BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [nullable_col@0 ASC, non_nullable_col@1 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[COUNT(non_nullable_col@1) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" SortExec: expr=[nullable_col@0 ASC,non_nullable_col@1 ASC]",
" MemoryExec: partitions=1, partition_sizes=[0]"];
assert_optimized!(expected_input, expected_optimized, physical_plan, true);
Expand Down Expand Up @@ -2043,7 +2043,7 @@ mod tests {
let expected_input = vec![
"SortExec: expr=[nullable_col@0 ASC]",
" RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=1",
" BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" BoundedWindowAggExec: wdw=[COUNT(nullable_col@0) ORDER BY [nullable_col@0 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" MemoryExec: partitions=1, partition_sizes=[0]",
];
assert_eq!(
Expand Down Expand Up @@ -2260,15 +2260,15 @@ mod tests {
let physical_plan = bounded_window_exec("a", sort_exprs, spm);

let expected_input = [
"BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
"BoundedWindowAggExec: wdw=[COUNT(a@0) ORDER BY [a@0 ASC, b@1 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" SortPreservingMergeExec: [a@0 ASC,b@1 ASC]",
" SortPreservingRepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=10, sort_exprs=a@0 ASC,b@1 ASC",
" RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=1",
" SortExec: expr=[a@0 ASC,b@1 ASC]",
" CsvExec: file_groups={1 group: [[x]]}, projection=[a, b, c, d, e], has_header=false",
];
let expected_optimized = [
"BoundedWindowAggExec: wdw=[count: Ok(Field { name: \"count\", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
"BoundedWindowAggExec: wdw=[COUNT(a@0) ORDER BY [a@0 ASC, b@1 ASC], frame: WindowFrame { units: Range, start_bound: Preceding(NULL), end_bound: CurrentRow }], mode=[Sorted]",
" SortExec: expr=[a@0 ASC,b@1 ASC]",
" CoalescePartitionsExec",
" RepartitionExec: partitioning=RoundRobinBatch(10), input_partitions=10",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ mod tests {
// expected not to push the limit to the AggregateExec
let expected = [
"LocalLimitExec: fetch=10",
"AggregateExec: mode=Single, gby=[a@0 as a], aggr=[COUNT(*)]",
"AggregateExec: mode=Single, gby=[a@0 as a], aggr=[COUNT(1)]",
"MemoryExec: partitions=1, partition_sizes=[1]",
];
let plan: Arc<dyn ExecutionPlan> = Arc::new(limit_exec);
Expand Down
6 changes: 3 additions & 3 deletions datafusion/core/tests/sql/explain_analyze.rs
Original file line number Diff line number Diff line change
Expand Up @@ -614,10 +614,10 @@ async fn test_physical_plan_display_indent() {
" SortPreservingMergeExec: [the_min@2 DESC], fetch=10",
" SortExec: TopK(fetch=10), expr=[the_min@2 DESC]",
" ProjectionExec: expr=[c1@0 as c1, MAX(aggregate_test_100.c12)@1 as MAX(aggregate_test_100.c12), MIN(aggregate_test_100.c12)@2 as the_min]",
" AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[MAX(aggregate_test_100.c12), MIN(aggregate_test_100.c12)]",
" AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[MAX(c12@1), MIN(c12@1)]",
" CoalesceBatchesExec: target_batch_size=4096",
" RepartitionExec: partitioning=Hash([c1@0], 9000), input_partitions=9000",
" AggregateExec: mode=Partial, gby=[c1@0 as c1], aggr=[MAX(aggregate_test_100.c12), MIN(aggregate_test_100.c12)]",
" AggregateExec: mode=Partial, gby=[c1@0 as c1], aggr=[MAX(c12@1), MIN(c12@1)]",
" CoalesceBatchesExec: target_batch_size=4096",
" FilterExec: c12@1 < 10",
" RepartitionExec: partitioning=RoundRobinBatch(9000), input_partitions=1",
Expand Down Expand Up @@ -699,7 +699,7 @@ async fn csv_explain_analyze() {
// Only test basic plumbing and try to avoid having to change too
// many things. explain_analyze_baseline_metrics covers the values
// in greater depth
let needle = "AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[COUNT(*)], metrics=[output_rows=5";
let needle = "AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[COUNT(1)], metrics=[output_rows=5";
assert_contains!(&formatted, needle);

let verbose_needle = "Output Rows";
Expand Down
6 changes: 4 additions & 2 deletions datafusion/core/tests/sql/group_by.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,10 @@ async fn group_by_limit() -> Result<()> {
let mut expected_physical_plan = r#"
GlobalLimitExec: skip=0, fetch=4
SortExec: TopK(fetch=4), expr=[MAX(traces.ts)@1 DESC]
AggregateExec: mode=Single, gby=[trace_id@0 as trace_id], aggr=[MAX(traces.ts)], lim=[4]
"#.trim().to_string();
AggregateExec: mode=Single, gby=[trace_id@0 as trace_id], aggr=[MAX(ts@1)], lim=[4]
"#
.trim()
.to_string();
let actual_phys_plan =
format_plan(physical_plan.clone(), &mut expected_physical_plan);
assert_eq!(actual_phys_plan, expected_physical_plan);
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/approx_distinct.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,10 @@ impl AggregateExpr for ApproxDistinct {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"APPROX_DISTINCT"
}
}

impl PartialEq<dyn Any> for ApproxDistinct {
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/approx_median.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ impl AggregateExpr for ApproxMedian {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"APPROX_MEDIAN"
}
}

impl PartialEq<dyn Any> for ApproxMedian {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,10 @@ impl AggregateExpr for ApproxPercentileCont {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"APPROX_PERCENTILE_CONT"
}
}

impl PartialEq<dyn Any> for ApproxPercentileCont {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@ impl AggregateExpr for ApproxPercentileContWithWeight {
fn name(&self) -> &str {
self.approx_percentile_cont.name()
}

fn func_name(&self) -> &str {
"APPROX_PERCENTILE_CONT"
}
}

impl PartialEq<dyn Any> for ApproxPercentileContWithWeight {
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/array_agg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ impl AggregateExpr for ArrayAgg {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"ARRAY_AGG"
}
}

impl PartialEq<dyn Any> for ArrayAgg {
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/array_agg_distinct.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,10 @@ impl AggregateExpr for DistinctArrayAgg {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"ARRAY_AGG"
}
}

impl PartialEq<dyn Any> for DistinctArrayAgg {
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/array_agg_ordered.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,10 @@ impl AggregateExpr for OrderSensitiveArrayAgg {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"ARRAY_AGG"
}
}

impl PartialEq<dyn Any> for OrderSensitiveArrayAgg {
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/average.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,10 @@ impl AggregateExpr for Avg {
&self.name
}

fn func_name(&self) -> &str {
"AVG"
}

fn reverse_expr(&self) -> Option<Arc<dyn AggregateExpr>> {
Some(Arc::new(self.clone()))
}
Expand Down
16 changes: 16 additions & 0 deletions datafusion/physical-expr/src/aggregate/bit_and_or_xor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,10 @@ impl AggregateExpr for BitAnd {
&self.name
}

fn func_name(&self) -> &str {
"BIT_AND"
}

fn groups_accumulator_supported(&self) -> bool {
true
}
Expand Down Expand Up @@ -274,6 +278,10 @@ impl AggregateExpr for BitOr {
&self.name
}

fn func_name(&self) -> &str {
"BIT_OR"
}

fn groups_accumulator_supported(&self) -> bool {
true
}
Expand Down Expand Up @@ -435,6 +443,10 @@ impl AggregateExpr for BitXor {
&self.name
}

fn func_name(&self) -> &str {
"BIT_XOR"
}

fn groups_accumulator_supported(&self) -> bool {
true
}
Expand Down Expand Up @@ -596,6 +608,10 @@ impl AggregateExpr for DistinctBitXor {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"BIT_XOR"
}
}

impl PartialEq<dyn Any> for DistinctBitXor {
Expand Down
8 changes: 8 additions & 0 deletions datafusion/physical-expr/src/aggregate/bool_and_or.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,10 @@ impl AggregateExpr for BoolAnd {
&self.name
}

fn func_name(&self) -> &str {
"BOOL_AND"
}

fn groups_accumulator_supported(&self) -> bool {
true
}
Expand Down Expand Up @@ -263,6 +267,10 @@ impl AggregateExpr for BoolOr {
&self.name
}

fn func_name(&self) -> &str {
"BOOL_OR"
}

fn groups_accumulator_supported(&self) -> bool {
true
}
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/correlation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,10 @@ impl AggregateExpr for Correlation {
fn name(&self) -> &str {
&self.name
}

fn func_name(&self) -> &str {
"CORR"
}
}

impl PartialEq<dyn Any> for Correlation {
Expand Down
4 changes: 4 additions & 0 deletions datafusion/physical-expr/src/aggregate/count.rs
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,10 @@ impl AggregateExpr for Count {
&self.name
}

fn func_name(&self) -> &str {
"COUNT"
}

fn groups_accumulator_supported(&self) -> bool {
// groups accumulator only supports `COUNT(c1)`, not
// `COUNT(c1, c2)`, etc
Expand Down
Loading