@@ -80,6 +80,7 @@ func TestBlockCleaner_KeyPermissionDenied(t *testing.T) {
80
80
DeletionDelay : deletionDelay ,
81
81
CleanupInterval : time .Minute ,
82
82
CleanupConcurrency : 1 ,
83
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour , 12 * time .Hour , 24 * time .Hour }).ToMilliseconds (),
83
84
}
84
85
85
86
logger := log .NewNopLogger ()
@@ -182,6 +183,8 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions
182
183
183
184
// Create Parquet marker
184
185
block13 := createTSDBBlock (t , bucketClient , "user-6" , 30 , 50 , nil )
186
+ // This block should be converted to Parquet format so counted as remaining.
187
+ block14 := createTSDBBlock (t , bucketClient , "user-6" , 30 , 50 , nil )
185
188
createParquetMarker (t , bucketClient , "user-6" , block13 )
186
189
187
190
// The fixtures have been created. If the bucket client wasn't wrapped to write
@@ -196,6 +199,7 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions
196
199
CleanupConcurrency : options .concurrency ,
197
200
BlockDeletionMarksMigrationEnabled : options .markersMigrationEnabled ,
198
201
TenantCleanupDelay : options .tenantDeletionDelay ,
202
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour }).ToMilliseconds (),
199
203
}
200
204
201
205
reg := prometheus .NewPedanticRegistry ()
@@ -251,6 +255,7 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions
251
255
{path : path .Join ("user-3" , block10 .String (), parquet .ConverterMarkerFileName ), expectedExists : false },
252
256
{path : path .Join ("user-4" , block .DebugMetas , "meta.json" ), expectedExists : options .user4FilesExist },
253
257
{path : path .Join ("user-6" , block13 .String (), parquet .ConverterMarkerFileName ), expectedExists : true },
258
+ {path : path .Join ("user-6" , block14 .String (), parquet .ConverterMarkerFileName ), expectedExists : false },
254
259
} {
255
260
exists , err := bucketClient .Exists (ctx , tc .path )
256
261
require .NoError (t , err )
@@ -296,6 +301,11 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions
296
301
}, {
297
302
userID : "user-3" ,
298
303
expectedIndex : false ,
304
+ }, {
305
+ userID : "user-6" ,
306
+ expectedIndex : true ,
307
+ expectedBlocks : []ulid.ULID {block13 , block14 },
308
+ expectedMarks : []ulid.ULID {},
299
309
},
300
310
} {
301
311
idx , err := bucketindex .ReadIndex (ctx , bucketClient , tc .userID , nil , logger )
@@ -318,7 +328,7 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions
318
328
cortex_bucket_blocks_count{user="user-1"} 2
319
329
cortex_bucket_blocks_count{user="user-2"} 1
320
330
cortex_bucket_blocks_count{user="user-5"} 2
321
- cortex_bucket_blocks_count{user="user-6"} 1
331
+ cortex_bucket_blocks_count{user="user-6"} 2
322
332
# HELP cortex_bucket_blocks_marked_for_deletion_count Total number of blocks marked for deletion in the bucket.
323
333
# TYPE cortex_bucket_blocks_marked_for_deletion_count gauge
324
334
cortex_bucket_blocks_marked_for_deletion_count{user="user-1"} 1
@@ -341,9 +351,14 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions
341
351
# TYPE cortex_bucket_parquet_blocks_count gauge
342
352
cortex_bucket_parquet_blocks_count{user="user-5"} 0
343
353
cortex_bucket_parquet_blocks_count{user="user-6"} 1
354
+ # HELP cortex_bucket_parquet_unconverted_blocks_count Total number of unconverted parquet blocks in the bucket. Blocks marked for deletion are included.
355
+ # TYPE cortex_bucket_parquet_unconverted_blocks_count gauge
356
+ cortex_bucket_parquet_unconverted_blocks_count{user="user-5"} 0
357
+ cortex_bucket_parquet_unconverted_blocks_count{user="user-6"} 0
344
358
` ),
345
359
"cortex_bucket_blocks_count" ,
346
360
"cortex_bucket_parquet_blocks_count" ,
361
+ "cortex_bucket_parquet_unconverted_blocks_count" ,
347
362
"cortex_bucket_blocks_marked_for_deletion_count" ,
348
363
"cortex_bucket_blocks_marked_for_no_compaction_count" ,
349
364
"cortex_bucket_blocks_partials_count" ,
@@ -378,6 +393,7 @@ func TestBlocksCleaner_ShouldContinueOnBlockDeletionFailure(t *testing.T) {
378
393
DeletionDelay : deletionDelay ,
379
394
CleanupInterval : time .Minute ,
380
395
CleanupConcurrency : 1 ,
396
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour , 12 * time .Hour , 24 * time .Hour }).ToMilliseconds (),
381
397
}
382
398
383
399
logger := log .NewNopLogger ()
@@ -447,6 +463,7 @@ func TestBlocksCleaner_ShouldRebuildBucketIndexOnCorruptedOne(t *testing.T) {
447
463
DeletionDelay : deletionDelay ,
448
464
CleanupInterval : time .Minute ,
449
465
CleanupConcurrency : 1 ,
466
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour , 12 * time .Hour , 24 * time .Hour }).ToMilliseconds (),
450
467
}
451
468
452
469
logger := log .NewNopLogger ()
@@ -508,6 +525,7 @@ func TestBlocksCleaner_ShouldRemoveMetricsForTenantsNotBelongingAnymoreToTheShar
508
525
DeletionDelay : time .Hour ,
509
526
CleanupInterval : time .Minute ,
510
527
CleanupConcurrency : 1 ,
528
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour , 12 * time .Hour , 24 * time .Hour }).ToMilliseconds (),
511
529
}
512
530
513
531
ctx := context .Background ()
@@ -657,6 +675,7 @@ func TestBlocksCleaner_ShouldRemoveBlocksOutsideRetentionPeriod(t *testing.T) {
657
675
DeletionDelay : time .Hour ,
658
676
CleanupInterval : time .Minute ,
659
677
CleanupConcurrency : 1 ,
678
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour , 12 * time .Hour , 24 * time .Hour }).ToMilliseconds (),
660
679
}
661
680
662
681
ctx := context .Background ()
@@ -889,6 +908,7 @@ func TestBlocksCleaner_CleanPartitionedGroupInfo(t *testing.T) {
889
908
CleanupConcurrency : 1 ,
890
909
ShardingStrategy : util .ShardingStrategyShuffle ,
891
910
CompactionStrategy : util .CompactionStrategyPartitioning ,
911
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour , 12 * time .Hour , 24 * time .Hour }).ToMilliseconds (),
892
912
}
893
913
894
914
ctx := context .Background ()
@@ -964,6 +984,7 @@ func TestBlocksCleaner_DeleteEmptyBucketIndex(t *testing.T) {
964
984
CleanupConcurrency : 1 ,
965
985
ShardingStrategy : util .ShardingStrategyShuffle ,
966
986
CompactionStrategy : util .CompactionStrategyPartitioning ,
987
+ BlockRanges : (& tsdb.DurationList {2 * time .Hour , 12 * time .Hour , 24 * time .Hour }).ToMilliseconds (),
967
988
}
968
989
969
990
ctx := context .Background ()
@@ -1021,6 +1042,91 @@ func TestBlocksCleaner_DeleteEmptyBucketIndex(t *testing.T) {
1021
1042
require .True (t , userBucket .IsObjNotFoundErr (err ))
1022
1043
}
1023
1044
1045
+ func TestBlocksCleaner_ParquetMetrics (t * testing.T ) {
1046
+ // Create metrics
1047
+ reg := prometheus .NewPedanticRegistry ()
1048
+ blocksMarkedForDeletion := promauto .With (reg ).NewCounterVec (
1049
+ prometheus.CounterOpts {
1050
+ Name : "cortex_compactor_blocks_marked_for_deletion_total" ,
1051
+ Help : "Total number of blocks marked for deletion in compactor." ,
1052
+ },
1053
+ []string {"user" , "reason" },
1054
+ )
1055
+ remainingPlannedCompactions := promauto .With (reg ).NewGaugeVec (
1056
+ prometheus.GaugeOpts {
1057
+ Name : "cortex_compactor_remaining_planned_compactions" ,
1058
+ Help : "Total number of remaining planned compactions." ,
1059
+ },
1060
+ []string {"user" },
1061
+ )
1062
+
1063
+ // Create the blocks cleaner
1064
+ cleaner := NewBlocksCleaner (
1065
+ BlocksCleanerConfig {
1066
+ BlockRanges : (& tsdb.DurationList {
1067
+ 2 * time .Hour ,
1068
+ 12 * time .Hour ,
1069
+ }).ToMilliseconds (),
1070
+ },
1071
+ nil , // bucket not needed
1072
+ nil , // usersScanner not needed
1073
+ 0 ,
1074
+ & mockConfigProvider {
1075
+ parquetConverterEnabled : map [string ]bool {
1076
+ "user1" : true ,
1077
+ },
1078
+ },
1079
+ log .NewNopLogger (),
1080
+ "test" ,
1081
+ reg ,
1082
+ 0 ,
1083
+ 0 ,
1084
+ blocksMarkedForDeletion ,
1085
+ remainingPlannedCompactions ,
1086
+ )
1087
+
1088
+ // Create test blocks in the index
1089
+ now := time .Now ()
1090
+ idx := & bucketindex.Index {
1091
+ Blocks : bucketindex.Blocks {
1092
+ {
1093
+ ID : ulid .MustNew (ulid .Now (), rand .Reader ),
1094
+ MinTime : now .Add (- 3 * time .Hour ).UnixMilli (),
1095
+ MaxTime : now .UnixMilli (),
1096
+ Parquet : & parquet.ConverterMarkMeta {},
1097
+ },
1098
+ {
1099
+ ID : ulid .MustNew (ulid .Now (), rand .Reader ),
1100
+ MinTime : now .Add (- 3 * time .Hour ).UnixMilli (),
1101
+ MaxTime : now .UnixMilli (),
1102
+ Parquet : nil ,
1103
+ },
1104
+ {
1105
+ ID : ulid .MustNew (ulid .Now (), rand .Reader ),
1106
+ MinTime : now .Add (- 5 * time .Hour ).UnixMilli (),
1107
+ MaxTime : now .UnixMilli (),
1108
+ Parquet : nil ,
1109
+ },
1110
+ },
1111
+ }
1112
+
1113
+ // Update metrics
1114
+ cleaner .updateBucketMetrics ("user1" , true , idx , 0 , 0 )
1115
+
1116
+ // Verify metrics
1117
+ require .NoError (t , prom_testutil .CollectAndCompare (cleaner .tenantParquetBlocks , strings .NewReader (`
1118
+ # HELP cortex_bucket_parquet_blocks_count Total number of parquet blocks in the bucket. Blocks marked for deletion are included.
1119
+ # TYPE cortex_bucket_parquet_blocks_count gauge
1120
+ cortex_bucket_parquet_blocks_count{user="user1"} 1
1121
+ ` )))
1122
+
1123
+ require .NoError (t , prom_testutil .CollectAndCompare (cleaner .tenantParquetUnConvertedBlocks , strings .NewReader (`
1124
+ # HELP cortex_bucket_parquet_unconverted_blocks_count Total number of unconverted parquet blocks in the bucket. Blocks marked for deletion are included.
1125
+ # TYPE cortex_bucket_parquet_unconverted_blocks_count gauge
1126
+ cortex_bucket_parquet_unconverted_blocks_count{user="user1"} 2
1127
+ ` )))
1128
+ }
1129
+
1024
1130
type mockConfigProvider struct {
1025
1131
userRetentionPeriods map [string ]time.Duration
1026
1132
parquetConverterEnabled map [string ]bool
0 commit comments