diff --git a/src/query/service/src/physical_plans/physical_aggregate_partial.rs b/src/query/service/src/physical_plans/physical_aggregate_partial.rs index c2d39d7430af3..1684ed9975b5f 100644 --- a/src/query/service/src/physical_plans/physical_aggregate_partial.rs +++ b/src/query/service/src/physical_plans/physical_aggregate_partial.rs @@ -238,7 +238,7 @@ impl IPhysicalPlan for AggregatePartial { }; let shared_partition_streams = SharedPartitionStream::new( builder.main_pipeline.output_len(), - max_block_rows, + 0, max_block_bytes, bucket_num, ); diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/new_aggregate/new_transform_final_aggregate.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/new_aggregate/new_transform_final_aggregate.rs index 1a4944a1b1165..57eb1a0cfd59a 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/new_aggregate/new_transform_final_aggregate.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/new_aggregate/new_transform_final_aggregate.rs @@ -47,7 +47,7 @@ use crate::pipelines::processors::transforms::aggregator::statistics::Aggregatio use crate::pipelines::processors::transforms::aggregator::transform_aggregate_partial::HashTable; use crate::sessions::QueryContext; -const SPILL_BUCKET_NUM: usize = 2; +const SPILL_BUCKET_NUM: usize = 4; const SPILL_BUCKET_BITS: u64 = SPILL_BUCKET_NUM.trailing_zeros() as u64; enum Stage { @@ -113,11 +113,7 @@ impl NewTransformFinalAggregate { ctx.clone(), SPILL_BUCKET_NUM, params.spill_schema(), - LocalPartitionStream::new( - params.max_block_rows, - params.max_block_bytes, - SPILL_BUCKET_NUM, - ), + LocalPartitionStream::new(0, params.max_block_bytes, SPILL_BUCKET_NUM), )?; Ok(Box::new(NewTransformFinalAggregate { diff --git a/src/query/service/src/spillers/async_buffer.rs b/src/query/service/src/spillers/async_buffer.rs index e84b461b5fa71..24f6b52bf30e2 100644 --- a/src/query/service/src/spillers/async_buffer.rs +++ b/src/query/service/src/spillers/async_buffer.rs @@ -520,6 +520,7 @@ impl SpillsDataWriter { .set_compression(Compression::LZ4_RAW) .set_statistics_enabled(EnabledStatistics::None) .set_bloom_filter_enabled(false) + .set_dictionary_enabled(false) .build(); let arrow_schema = Arc::new(Schema::from(table_schema.as_ref())); diff --git a/src/query/settings/src/settings_default.rs b/src/query/settings/src/settings_default.rs index 07e30f424803b..f7772eab0f954 100644 --- a/src/query/settings/src/settings_default.rs +++ b/src/query/settings/src/settings_default.rs @@ -1626,8 +1626,8 @@ impl DefaultSettings { range: Some(SettingRange::Numeric(0..=1)), }), ("max_aggregate_spill_level", DefaultSettingValue { - value: UserSettingValue::UInt64(1), - desc: "Maximum recursion depth for the aggregate spill. Each recursion level repartition data into `num_cpu` smaller parts to ensure it fits in memory.", + value: UserSettingValue::UInt64(3), + desc: "Maximum recursion depth for the aggregate spill. Each recursion level repartition data into 4 smaller parts to ensure it fits in memory.", mode: SettingMode::Both, scope: SettingScope::Both, range: Some(SettingRange::Numeric(0..=16)),