@@ -84,7 +84,7 @@ async fn group_by_none() {
84
84
TestCase :: new ( )
85
85
. with_query ( "select median(request_bytes) from t" )
86
86
. with_expected_errors ( vec ! [
87
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: AggregateStream"
87
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n AggregateStream"
88
88
] )
89
89
. with_memory_limit ( 2_000 )
90
90
. run ( )
@@ -96,7 +96,7 @@ async fn group_by_row_hash() {
96
96
TestCase :: new ( )
97
97
. with_query ( "select count(*) from t GROUP BY response_bytes" )
98
98
. with_expected_errors ( vec ! [
99
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: GroupedHashAggregateStream"
99
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n GroupedHashAggregateStream"
100
100
] )
101
101
. with_memory_limit ( 2_000 )
102
102
. run ( )
@@ -109,7 +109,7 @@ async fn group_by_hash() {
109
109
// group by dict column
110
110
. with_query ( "select count(*) from t GROUP BY service, host, pod, container" )
111
111
. with_expected_errors ( vec ! [
112
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: GroupedHashAggregateStream"
112
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n GroupedHashAggregateStream"
113
113
] )
114
114
. with_memory_limit ( 1_000 )
115
115
. run ( )
@@ -122,7 +122,7 @@ async fn join_by_key_multiple_partitions() {
122
122
TestCase :: new ( )
123
123
. with_query ( "select t1.* from t t1 JOIN t t2 ON t1.service = t2.service" )
124
124
. with_expected_errors ( vec ! [
125
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: HashJoinInput" ,
125
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n HashJoinInput" ,
126
126
] )
127
127
. with_memory_limit ( 1_000 )
128
128
. with_config ( config)
@@ -136,7 +136,7 @@ async fn join_by_key_single_partition() {
136
136
TestCase :: new ( )
137
137
. with_query ( "select t1.* from t t1 JOIN t t2 ON t1.service = t2.service" )
138
138
. with_expected_errors ( vec ! [
139
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: HashJoinInput" ,
139
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n HashJoinInput" ,
140
140
] )
141
141
. with_memory_limit ( 1_000 )
142
142
. with_config ( config)
@@ -149,7 +149,7 @@ async fn join_by_expression() {
149
149
TestCase :: new ( )
150
150
. with_query ( "select t1.* from t t1 JOIN t t2 ON t1.service != t2.service" )
151
151
. with_expected_errors ( vec ! [
152
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: NestedLoopJoinLoad[0]" ,
152
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n NestedLoopJoinLoad[0]" ,
153
153
] )
154
154
. with_memory_limit ( 1_000 )
155
155
. run ( )
@@ -161,7 +161,7 @@ async fn cross_join() {
161
161
TestCase :: new ( )
162
162
. with_query ( "select t1.*, t2.* from t t1 CROSS JOIN t t2" )
163
163
. with_expected_errors ( vec ! [
164
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: CrossJoinExec" ,
164
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n CrossJoinExec" ,
165
165
] )
166
166
. with_memory_limit ( 1_000 )
167
167
. run ( )
@@ -217,7 +217,7 @@ async fn symmetric_hash_join() {
217
217
"select t1.* from t t1 JOIN t t2 ON t1.pod = t2.pod AND t1.time = t2.time" ,
218
218
)
219
219
. with_expected_errors ( vec ! [
220
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: SymmetricHashJoinStream" ,
220
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n SymmetricHashJoinStream" ,
221
221
] )
222
222
. with_memory_limit ( 1_000 )
223
223
. with_scenario ( Scenario :: AccessLogStreaming )
@@ -235,7 +235,7 @@ async fn sort_preserving_merge() {
235
235
// so only a merge is needed
236
236
. with_query ( "select * from t ORDER BY a ASC NULLS LAST, b ASC NULLS LAST LIMIT 10" )
237
237
. with_expected_errors ( vec ! [
238
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: SortPreservingMergeExec" ,
238
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n SortPreservingMergeExec" ,
239
239
] )
240
240
// provide insufficient memory to merge
241
241
. with_memory_limit ( partition_size / 2 )
@@ -315,7 +315,7 @@ async fn sort_spill_reservation() {
315
315
test. clone ( )
316
316
. with_expected_errors ( vec ! [
317
317
"Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:" ,
318
- "bytes for ExternalSorterMerge" ,
318
+ "B for ExternalSorterMerge" ,
319
319
] )
320
320
. with_config ( config)
321
321
. run ( )
@@ -344,7 +344,7 @@ async fn oom_recursive_cte() {
344
344
SELECT * FROM nodes;" ,
345
345
)
346
346
. with_expected_errors ( vec ! [
347
- "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as: RecursiveQuery" ,
347
+ "Resources exhausted: Additional allocation failed with top memory consumers (across reservations) as:\n RecursiveQuery" ,
348
348
] )
349
349
. with_memory_limit ( 2_000 )
350
350
. run ( )
@@ -396,7 +396,7 @@ async fn oom_with_tracked_consumer_pool() {
396
396
. with_expected_errors ( vec ! [
397
397
"Failed to allocate additional" ,
398
398
"for ParquetSink(ArrowColumnWriter)" ,
399
- "Additional allocation failed with top memory consumers (across reservations) as: ParquetSink(ArrowColumnWriter)"
399
+ "Additional allocation failed with top memory consumers (across reservations) as:\n ParquetSink(ArrowColumnWriter)"
400
400
] )
401
401
. with_memory_pool ( Arc :: new (
402
402
TrackConsumersPool :: new (
0 commit comments