@@ -3364,4 +3364,129 @@ mod test {
3364
3364
pending_responses. remove ( & peers[ 1 ] ) ;
3365
3365
assert_eq ! ( pending_responses. len( ) , 0 ) ;
3366
3366
}
3367
+
3368
+ /// The test demonstrates https://github.com/paritytech/polkadot-sdk/issues/2094.
3369
+ /// TODO: convert it into desired behavior test once the issue is fixed (see inline comments).
3370
+ /// The issue: we currently rely on block numbers instead of block hash
3371
+ /// to download blocks from peers. As a result, we can end up with blocks
3372
+ /// from different forks as shown by the test.
3373
+ #[ test]
3374
+ #[ should_panic]
3375
+ fn request_across_forks ( ) {
3376
+ sp_tracing:: try_init_simple ( ) ;
3377
+
3378
+ let ( _chain_sync_network_provider, chain_sync_network_handle) =
3379
+ NetworkServiceProvider :: new ( ) ;
3380
+ let mut client = Arc :: new ( TestClientBuilder :: new ( ) . build ( ) ) ;
3381
+ let blocks = ( 0 ..100 ) . map ( |_| build_block ( & mut client, None , false ) ) . collect :: < Vec < _ > > ( ) ;
3382
+
3383
+ let fork_a_blocks = {
3384
+ let mut client = Arc :: new ( TestClientBuilder :: new ( ) . build ( ) ) ;
3385
+ let mut fork_blocks = blocks[ ..]
3386
+ . into_iter ( )
3387
+ . inspect ( |b| {
3388
+ assert ! ( matches!( client. block( * b. header. parent_hash( ) ) , Ok ( Some ( _) ) ) ) ;
3389
+ block_on ( client. import ( BlockOrigin :: Own , ( * b) . clone ( ) ) ) . unwrap ( )
3390
+ } )
3391
+ . cloned ( )
3392
+ . collect :: < Vec < _ > > ( ) ;
3393
+ for _ in 0 ..10 {
3394
+ fork_blocks. push ( build_block ( & mut client, None , false ) ) ;
3395
+ }
3396
+ fork_blocks
3397
+ } ;
3398
+
3399
+ let fork_b_blocks = {
3400
+ let mut client = Arc :: new ( TestClientBuilder :: new ( ) . build ( ) ) ;
3401
+ let mut fork_blocks = blocks[ ..]
3402
+ . into_iter ( )
3403
+ . inspect ( |b| {
3404
+ assert ! ( matches!( client. block( * b. header. parent_hash( ) ) , Ok ( Some ( _) ) ) ) ;
3405
+ block_on ( client. import ( BlockOrigin :: Own , ( * b) . clone ( ) ) ) . unwrap ( )
3406
+ } )
3407
+ . cloned ( )
3408
+ . collect :: < Vec < _ > > ( ) ;
3409
+ for _ in 0 ..10 {
3410
+ fork_blocks. push ( build_block ( & mut client, None , true ) ) ;
3411
+ }
3412
+ fork_blocks
3413
+ } ;
3414
+
3415
+ let mut sync = ChainSync :: new (
3416
+ SyncMode :: Full ,
3417
+ client. clone ( ) ,
3418
+ ProtocolName :: from ( "test-block-announce-protocol" ) ,
3419
+ 5 ,
3420
+ 64 ,
3421
+ None ,
3422
+ chain_sync_network_handle,
3423
+ )
3424
+ . unwrap ( ) ;
3425
+
3426
+ // Add the peers, all at the common ancestor 100.
3427
+ let common_block = blocks. last ( ) . unwrap ( ) ;
3428
+ let peer_id1 = PeerId :: random ( ) ;
3429
+ sync. new_peer ( peer_id1, common_block. hash ( ) , * common_block. header ( ) . number ( ) )
3430
+ . unwrap ( ) ;
3431
+ let peer_id2 = PeerId :: random ( ) ;
3432
+ sync. new_peer ( peer_id2, common_block. hash ( ) , * common_block. header ( ) . number ( ) )
3433
+ . unwrap ( ) ;
3434
+
3435
+ // Peer 1 announces 107 from fork 1, 100-107 get downloaded.
3436
+ {
3437
+ let block = ( & fork_a_blocks[ 106 ] ) . clone ( ) ;
3438
+ let peer = peer_id1;
3439
+ log:: trace!( target: LOG_TARGET , "<1> {peer} announces from fork 1" ) ;
3440
+ send_block_announce ( block. header ( ) . clone ( ) , peer, & mut sync) ;
3441
+ let request = get_block_request ( & mut sync, FromBlock :: Hash ( block. hash ( ) ) , 7 , & peer) ;
3442
+ let mut resp_blocks = fork_a_blocks[ 100_usize ..107_usize ] . to_vec ( ) ;
3443
+ resp_blocks. reverse ( ) ;
3444
+ let response = create_block_response ( resp_blocks. clone ( ) ) ;
3445
+ let res = sync. on_block_data ( & peer, Some ( request) , response) . unwrap ( ) ;
3446
+ assert ! ( matches!(
3447
+ res,
3448
+ OnBlockData :: Import ( ImportBlocksAction { origin: _, blocks } ) if blocks. len( ) == 7_usize
3449
+ ) , ) ;
3450
+ assert_eq ! ( sync. best_queued_number, 107 ) ;
3451
+ assert_eq ! ( sync. best_queued_hash, block. hash( ) ) ;
3452
+ assert ! ( sync. is_known( & block. header. parent_hash( ) ) ) ;
3453
+ }
3454
+
3455
+ // Peer 2 also announces 107 from fork 1.
3456
+ {
3457
+ let prev_best_number = sync. best_queued_number ;
3458
+ let prev_best_hash = sync. best_queued_hash ;
3459
+ let peer = peer_id2;
3460
+ log:: trace!( target: LOG_TARGET , "<2> {peer} announces from fork 1" ) ;
3461
+ for i in 100 ..107 {
3462
+ let block = ( & fork_a_blocks[ i] ) . clone ( ) ;
3463
+ send_block_announce ( block. header ( ) . clone ( ) , peer, & mut sync) ;
3464
+ assert ! ( sync. block_requests( ) . is_empty( ) ) ;
3465
+ }
3466
+ assert_eq ! ( sync. best_queued_number, prev_best_number) ;
3467
+ assert_eq ! ( sync. best_queued_hash, prev_best_hash) ;
3468
+ }
3469
+
3470
+ // Peer 2 undergoes reorg, announces 108 from fork 2, gets downloaded even though we
3471
+ // don't have the parent from fork 2.
3472
+ {
3473
+ let block = ( & fork_b_blocks[ 107 ] ) . clone ( ) ;
3474
+ let peer = peer_id2;
3475
+ log:: trace!( target: LOG_TARGET , "<3> {peer} announces from fork 2" ) ;
3476
+ send_block_announce ( block. header ( ) . clone ( ) , peer, & mut sync) ;
3477
+ // TODO: when the issue is fixed, this test can be changed to test the
3478
+ // expected behavior instead. The needed changes would be:
3479
+ // 1. Remove the `#[should_panic]` directive
3480
+ // 2. These should be changed to check that sync.block_requests().is_empty(), after the
3481
+ // block is announced.
3482
+ let request = get_block_request ( & mut sync, FromBlock :: Hash ( block. hash ( ) ) , 1 , & peer) ;
3483
+ let response = create_block_response ( vec ! [ block. clone( ) ] ) ;
3484
+ let res = sync. on_block_data ( & peer, Some ( request) , response) . unwrap ( ) ;
3485
+ assert ! ( matches!(
3486
+ res,
3487
+ OnBlockData :: Import ( ImportBlocksAction { origin: _, blocks } ) if blocks. len( ) == 1_usize
3488
+ ) , ) ;
3489
+ assert ! ( sync. is_known( & block. header. parent_hash( ) ) ) ;
3490
+ }
3491
+ }
3367
3492
}
0 commit comments