34
34
marker:: PhantomData ,
35
35
sync:: {
36
36
atomic:: { AtomicU64 , Ordering :: Relaxed } ,
37
- Arc , Mutex , Weak ,
37
+ Arc , Mutex , OnceLock , Weak ,
38
38
} ,
39
39
thread:: { self , JoinHandle } ,
40
40
} ,
@@ -48,6 +48,7 @@ type AtomicSchedulerId = AtomicU64;
48
48
#[ derive( Debug ) ]
49
49
pub struct SchedulerPool < S : SpawnableScheduler < TH > , TH : TaskHandler > {
50
50
scheduler_inners : Mutex < Vec < S :: Inner > > ,
51
+ handler_count : usize ,
51
52
handler_context : HandlerContext ,
52
53
// weak_self could be elided by changing InstalledScheduler::take_scheduler()'s receiver to
53
54
// Arc<Self> from &Self, because SchedulerPool is used as in the form of Arc<SchedulerPool>
@@ -83,13 +84,20 @@ where
83
84
// Some internal impl and test code want an actual concrete type, NOT the
84
85
// `dyn InstalledSchedulerPool`. So don't merge this into `Self::new_dyn()`.
85
86
fn new (
87
+ handler_count : Option < usize > ,
86
88
log_messages_bytes_limit : Option < usize > ,
87
89
transaction_status_sender : Option < TransactionStatusSender > ,
88
90
replay_vote_sender : Option < ReplayVoteSender > ,
89
91
prioritization_fee_cache : Arc < PrioritizationFeeCache > ,
90
92
) -> Arc < Self > {
93
+ let handler_count = handler_count. unwrap_or ( Self :: default_handler_count ( ) ) ;
94
+ // we're hard-coding the number of handler thread to 1, meaning this impl is currently
95
+ // single-threaded still.
96
+ assert_eq ! ( handler_count, 1 ) ; // replace this with assert!(handler_count >= 1) later
97
+
91
98
Arc :: new_cyclic ( |weak_self| Self {
92
99
scheduler_inners : Mutex :: default ( ) ,
100
+ handler_count,
93
101
handler_context : HandlerContext {
94
102
log_messages_bytes_limit,
95
103
transaction_status_sender,
@@ -105,12 +113,14 @@ where
105
113
// This apparently-meaningless wrapper is handy, because some callers explicitly want
106
114
// `dyn InstalledSchedulerPool` to be returned for type inference convenience.
107
115
pub fn new_dyn (
116
+ handler_count : Option < usize > ,
108
117
log_messages_bytes_limit : Option < usize > ,
109
118
transaction_status_sender : Option < TransactionStatusSender > ,
110
119
replay_vote_sender : Option < ReplayVoteSender > ,
111
120
prioritization_fee_cache : Arc < PrioritizationFeeCache > ,
112
121
) -> InstalledSchedulerPoolArc {
113
122
Self :: new (
123
+ handler_count,
114
124
log_messages_bytes_limit,
115
125
transaction_status_sender,
116
126
replay_vote_sender,
@@ -145,6 +155,37 @@ where
145
155
S :: spawn ( self . self_arc ( ) , context)
146
156
}
147
157
}
158
+
159
+ pub fn default_handler_count ( ) -> usize {
160
+ Self :: calculate_default_handler_count (
161
+ thread:: available_parallelism ( )
162
+ . ok ( )
163
+ . map ( |non_zero| non_zero. get ( ) ) ,
164
+ )
165
+ }
166
+
167
+ pub fn calculate_default_handler_count ( detected_cpu_core_count : Option < usize > ) -> usize {
168
+ // Divide by 4 just not to consume all available CPUs just with handler threads, sparing for
169
+ // other active forks and other subsystems.
170
+ // Also, if available_parallelism fails (which should be very rare), use 4 threads,
171
+ // as a relatively conservatism assumption of modern multi-core systems ranging from
172
+ // engineers' laptops to production servers.
173
+ detected_cpu_core_count
174
+ . map ( |core_count| ( core_count / 4 ) . max ( 1 ) )
175
+ . unwrap_or ( 4 )
176
+ }
177
+
178
+ pub fn cli_message ( ) -> & ' static str {
179
+ static MESSAGE : OnceLock < String > = OnceLock :: new ( ) ;
180
+
181
+ MESSAGE . get_or_init ( || {
182
+ format ! (
183
+ "Change the number of the unified scheduler's transaction execution threads \
184
+ dedicated to each block, otherwise calculated as cpu_cores/4 [default: {}]",
185
+ Self :: default_handler_count( )
186
+ )
187
+ } )
188
+ }
148
189
}
149
190
150
191
impl < S , TH > InstalledSchedulerPool for SchedulerPool < S , TH >
@@ -372,7 +413,6 @@ pub struct PooledSchedulerInner<S: SpawnableScheduler<TH>, TH: TaskHandler> {
372
413
struct ThreadManager < S : SpawnableScheduler < TH > , TH : TaskHandler > {
373
414
scheduler_id : SchedulerId ,
374
415
pool : Arc < SchedulerPool < S , TH > > ,
375
- handler_count : usize ,
376
416
new_task_sender : Sender < NewTaskPayload > ,
377
417
new_task_receiver : Receiver < NewTaskPayload > ,
378
418
session_result_sender : Sender < Option < ResultWithTimings > > ,
@@ -384,28 +424,24 @@ struct ThreadManager<S: SpawnableScheduler<TH>, TH: TaskHandler> {
384
424
385
425
impl < TH : TaskHandler > PooledScheduler < TH > {
386
426
fn do_spawn ( pool : Arc < SchedulerPool < Self , TH > > , initial_context : SchedulingContext ) -> Self {
387
- // we're hard-coding the number of handler thread to 1, meaning this impl is currently
388
- // single-threaded still.
389
- let handler_count = 1 ;
390
-
391
427
Self :: from_inner (
392
428
PooledSchedulerInner :: < Self , TH > {
393
- thread_manager : ThreadManager :: new ( pool, handler_count ) ,
429
+ thread_manager : ThreadManager :: new ( pool) ,
394
430
} ,
395
431
initial_context,
396
432
)
397
433
}
398
434
}
399
435
400
436
impl < S : SpawnableScheduler < TH > , TH : TaskHandler > ThreadManager < S , TH > {
401
- fn new ( pool : Arc < SchedulerPool < S , TH > > , handler_count : usize ) -> Self {
437
+ fn new ( pool : Arc < SchedulerPool < S , TH > > ) -> Self {
402
438
let ( new_task_sender, new_task_receiver) = unbounded ( ) ;
403
439
let ( session_result_sender, session_result_receiver) = unbounded ( ) ;
440
+ let handler_count = pool. handler_count ;
404
441
405
442
Self {
406
443
scheduler_id : pool. new_scheduler_id ( ) ,
407
444
pool,
408
- handler_count,
409
445
new_task_sender,
410
446
new_task_receiver,
411
447
session_result_sender,
@@ -477,7 +513,7 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> ThreadManager<S, TH> {
477
513
// 5. the handler thread reply back to the scheduler thread as an executed task.
478
514
// 6. the scheduler thread post-processes the executed task.
479
515
let scheduler_main_loop = || {
480
- let handler_count = self . handler_count ;
516
+ let handler_count = self . pool . handler_count ;
481
517
let session_result_sender = self . session_result_sender . clone ( ) ;
482
518
let new_task_receiver = self . new_task_receiver . clone ( ) ;
483
519
@@ -613,7 +649,7 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> ThreadManager<S, TH> {
613
649
. unwrap ( ) ,
614
650
) ;
615
651
616
- self . handler_threads = ( 0 ..self . handler_count )
652
+ self . handler_threads = ( 0 ..self . pool . handler_count )
617
653
. map ( {
618
654
|thx| {
619
655
thread:: Builder :: new ( )
@@ -760,7 +796,7 @@ mod tests {
760
796
761
797
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
762
798
let pool =
763
- DefaultSchedulerPool :: new_dyn ( None , None , None , ignored_prioritization_fee_cache) ;
799
+ DefaultSchedulerPool :: new_dyn ( None , None , None , None , ignored_prioritization_fee_cache) ;
764
800
765
801
// this indirectly proves that there should be circular link because there's only one Arc
766
802
// at this moment now
@@ -775,7 +811,7 @@ mod tests {
775
811
776
812
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
777
813
let pool =
778
- DefaultSchedulerPool :: new_dyn ( None , None , None , ignored_prioritization_fee_cache) ;
814
+ DefaultSchedulerPool :: new_dyn ( None , None , None , None , ignored_prioritization_fee_cache) ;
779
815
let bank = Arc :: new ( Bank :: default_for_tests ( ) ) ;
780
816
let context = SchedulingContext :: new ( bank) ;
781
817
let scheduler = pool. take_scheduler ( context) ;
@@ -789,7 +825,8 @@ mod tests {
789
825
solana_logger:: setup ( ) ;
790
826
791
827
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
792
- let pool = DefaultSchedulerPool :: new ( None , None , None , ignored_prioritization_fee_cache) ;
828
+ let pool =
829
+ DefaultSchedulerPool :: new ( None , None , None , None , ignored_prioritization_fee_cache) ;
793
830
let bank = Arc :: new ( Bank :: default_for_tests ( ) ) ;
794
831
let context = & SchedulingContext :: new ( bank) ;
795
832
@@ -817,7 +854,8 @@ mod tests {
817
854
solana_logger:: setup ( ) ;
818
855
819
856
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
820
- let pool = DefaultSchedulerPool :: new ( None , None , None , ignored_prioritization_fee_cache) ;
857
+ let pool =
858
+ DefaultSchedulerPool :: new ( None , None , None , None , ignored_prioritization_fee_cache) ;
821
859
let bank = Arc :: new ( Bank :: default_for_tests ( ) ) ;
822
860
let context = & SchedulingContext :: new ( bank) ;
823
861
let mut scheduler = pool. do_take_scheduler ( context. clone ( ) ) ;
@@ -835,7 +873,8 @@ mod tests {
835
873
solana_logger:: setup ( ) ;
836
874
837
875
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
838
- let pool = DefaultSchedulerPool :: new ( None , None , None , ignored_prioritization_fee_cache) ;
876
+ let pool =
877
+ DefaultSchedulerPool :: new ( None , None , None , None , ignored_prioritization_fee_cache) ;
839
878
let old_bank = & Arc :: new ( Bank :: default_for_tests ( ) ) ;
840
879
let new_bank = & Arc :: new ( Bank :: default_for_tests ( ) ) ;
841
880
assert ! ( !Arc :: ptr_eq( old_bank, new_bank) ) ;
@@ -861,7 +900,7 @@ mod tests {
861
900
let mut bank_forks = bank_forks. write ( ) . unwrap ( ) ;
862
901
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
863
902
let pool =
864
- DefaultSchedulerPool :: new_dyn ( None , None , None , ignored_prioritization_fee_cache) ;
903
+ DefaultSchedulerPool :: new_dyn ( None , None , None , None , ignored_prioritization_fee_cache) ;
865
904
bank_forks. install_scheduler_pool ( pool) ;
866
905
}
867
906
@@ -875,7 +914,7 @@ mod tests {
875
914
876
915
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
877
916
let pool =
878
- DefaultSchedulerPool :: new_dyn ( None , None , None , ignored_prioritization_fee_cache) ;
917
+ DefaultSchedulerPool :: new_dyn ( None , None , None , None , ignored_prioritization_fee_cache) ;
879
918
880
919
let bank = Bank :: default_for_tests ( ) ;
881
920
let bank_forks = BankForks :: new_rw_arc ( bank) ;
@@ -928,7 +967,7 @@ mod tests {
928
967
let bank = setup_dummy_fork_graph ( bank) ;
929
968
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
930
969
let pool =
931
- DefaultSchedulerPool :: new_dyn ( None , None , None , ignored_prioritization_fee_cache) ;
970
+ DefaultSchedulerPool :: new_dyn ( None , None , None , None , ignored_prioritization_fee_cache) ;
932
971
let context = SchedulingContext :: new ( bank. clone ( ) ) ;
933
972
934
973
assert_eq ! ( bank. transaction_count( ) , 0 ) ;
@@ -953,7 +992,7 @@ mod tests {
953
992
954
993
let ignored_prioritization_fee_cache = Arc :: new ( PrioritizationFeeCache :: new ( 0u64 ) ) ;
955
994
let pool =
956
- DefaultSchedulerPool :: new_dyn ( None , None , None , ignored_prioritization_fee_cache) ;
995
+ DefaultSchedulerPool :: new_dyn ( None , None , None , None , ignored_prioritization_fee_cache) ;
957
996
let context = SchedulingContext :: new ( bank. clone ( ) ) ;
958
997
let mut scheduler = pool. take_scheduler ( context) ;
959
998
@@ -1159,6 +1198,7 @@ mod tests {
1159
1198
None ,
1160
1199
None ,
1161
1200
None ,
1201
+ None ,
1162
1202
ignored_prioritization_fee_cache,
1163
1203
) ;
1164
1204
let scheduler = pool. take_scheduler ( context) ;
@@ -1193,4 +1233,18 @@ mod tests {
1193
1233
fn test_scheduler_schedule_execution_recent_blockhash_edge_case_without_race ( ) {
1194
1234
do_test_scheduler_schedule_execution_recent_blockhash_edge_case :: < false > ( ) ;
1195
1235
}
1236
+
1237
+ #[ test]
1238
+ fn test_default_handler_count ( ) {
1239
+ for ( detected, expected) in [ ( 32 , 8 ) , ( 4 , 1 ) , ( 2 , 1 ) ] {
1240
+ assert_eq ! (
1241
+ DefaultSchedulerPool :: calculate_default_handler_count( Some ( detected) ) ,
1242
+ expected
1243
+ ) ;
1244
+ }
1245
+ assert_eq ! (
1246
+ DefaultSchedulerPool :: calculate_default_handler_count( None ) ,
1247
+ 4
1248
+ ) ;
1249
+ }
1196
1250
}
0 commit comments