@@ -509,19 +509,13 @@ describe('test resume up', function () {
509
509
putExtra . partSize = partSize ;
510
510
}
511
511
512
- let upHosts = [ ] ;
512
+ let recordPersistPath = '' ;
513
513
const filePath = path . join ( os . tmpdir ( ) , key ) ;
514
514
const result = createRandomFile ( filePath , fileSizeMB * ( 1 << 20 ) )
515
515
// mock file
516
516
. then ( ( ) => {
517
517
// add to auto clean file
518
518
filepathListToDelete . push ( filePath ) ;
519
- filepathListToDelete . push ( putExtra . resumeRecordFile ) ;
520
-
521
- // upload and abort
522
- putExtra . progressCallback = ( _uploaded , _total ) => {
523
- throw new Error ( 'mocked error' ) ;
524
- } ;
525
519
} )
526
520
// get up hosts for generating resume key later
527
521
. then ( ( ) => resumeUploader . config . getRegionsProvider ( {
@@ -530,14 +524,49 @@ describe('test resume up', function () {
530
524
} ) )
531
525
. then ( regionsProvider => regionsProvider . getRegions ( ) )
532
526
. then ( regions => {
533
- const serviceName = resumeUploader . config . accelerateUploading
534
- ? SERVICE_NAME . UP_ACC
535
- : SERVICE_NAME . UP ;
536
- upHosts = regions [ 0 ] . services [ serviceName ] . map ( e => e . host ) ;
527
+ /** @type {string[] } */
528
+ const upAccEndpoints = regions [ 0 ] . services [ SERVICE_NAME . UP_ACC ] || [ ] ;
529
+ const upEndpoints = regions [ 0 ] . services [ SERVICE_NAME . UP ] || [ ] ;
530
+ const upHosts = upAccEndpoints . concat ( upEndpoints ) . map ( e => e . host ) ;
531
+ return Promise . resolve ( upHosts ) ;
537
532
} )
538
533
// get up hosts end
534
+ // get record file path
535
+ . then ( upHosts => {
536
+ if ( resumeRecordFile ) {
537
+ recordPersistPath = putExtra . resumeRecordFile ;
538
+ } else if ( resumeRecorderOption ) {
539
+ if ( resumeRecorderOption . resumeKey ) {
540
+ recordPersistPath = path . join (
541
+ resumeRecorderOption . baseDirPath ,
542
+ resumeRecorderOption . resumeKey
543
+ ) ;
544
+ } else if ( putExtra . resumeRecorder ) {
545
+ const expectResumeKey = putExtra . resumeRecorder . generateKeySync ( {
546
+ hosts : upHosts ,
547
+ accessKey,
548
+ bucketName,
549
+ key,
550
+ filePath,
551
+ version : version || 'v1' ,
552
+ partSize : partSize || qiniu . conf . BLOCK_SIZE
553
+ } ) ;
554
+ recordPersistPath = path . join (
555
+ resumeRecorderOption . baseDirPath ,
556
+ expectResumeKey
557
+ ) ;
558
+ }
559
+ }
560
+ if ( recordPersistPath ) {
561
+ filepathListToDelete . push ( recordPersistPath ) ;
562
+ }
563
+ } )
539
564
// mock upload failed
540
565
. then ( ( ) => {
566
+ // upload and abort
567
+ putExtra . progressCallback = ( _uploaded , _total ) => {
568
+ throw new Error ( 'mocked error' ) ;
569
+ } ;
541
570
return resumeUploader . putFile (
542
571
uploadToken ,
543
572
key ,
@@ -550,19 +579,29 @@ describe('test resume up', function () {
550
579
}
551
580
} ) ;
552
581
} )
582
+ // check record file
583
+ . then ( ( ) => {
584
+ if ( putExtra . resumeRecordFile || putExtra . resumeRecorder ) {
585
+ should . exists ( recordPersistPath ) ;
586
+ should . ok ( fs . existsSync ( recordPersistPath ) , 'record file should exists' ) ;
587
+ }
588
+ } )
553
589
// try to upload from resume point
554
590
. then ( ( ) => {
555
591
const couldResume = Boolean ( putExtra . resumeRecordFile || putExtra . resumeRecorder ) ;
556
- let isFirstPart = true ;
592
+ let isFirstPart = true ; // 是否首次片上传请求成功,断点续传时是从断点开始首次上传成功的片计算
557
593
putExtra . progressCallback = ( uploaded , _total ) => {
558
594
if ( ! isFirstPart ) {
559
595
return ;
560
596
}
597
+ const partNumber = partSize
598
+ ? uploaded / partSize
599
+ : uploaded / ( 4 * 1024 * 1024 ) ;
561
600
isFirstPart = false ;
562
- if ( couldResume && uploaded / partSize <= 1 ) {
601
+ if ( couldResume && partNumber <= 1 ) {
563
602
throw new Error ( 'should resume' ) ;
564
603
}
565
- if ( ! couldResume && uploaded / partSize > 1 ) {
604
+ if ( ! couldResume && partNumber > 1 ) {
566
605
throw new Error ( 'should not resume' ) ;
567
606
}
568
607
} ;
@@ -579,34 +618,9 @@ describe('test resume up', function () {
579
618
580
619
const checkFunc = ( { data } ) => {
581
620
data . should . have . keys ( 'key' , 'hash' ) ;
582
- if ( resumeRecordFile ) {
583
- should . ok ( ! fs . existsSync ( putExtra . resumeRecordFile ) ) ;
584
- } else if ( resumeRecorderOption ) {
585
- if ( resumeRecorderOption . resumeKey ) {
586
- should . ok ( ! fs . existsSync (
587
- path . join (
588
- resumeRecorderOption . baseDirPath ,
589
- resumeRecorderOption . resumeKey
590
- )
591
- ) ) ;
592
- } else {
593
- should . exist ( putExtra . resumeRecorder ) ;
594
- const expectResumeKey = putExtra . resumeRecorder . generateKeySync ( {
595
- hosts : upHosts ,
596
- accessKey,
597
- bucketName,
598
- key,
599
- filePath,
600
- version : version || 'v1' ,
601
- partSize : partSize || qiniu . conf . BLOCK_SIZE
602
- } ) ;
603
- should . ok ( ! fs . existsSync (
604
- path . join (
605
- resumeRecorderOption . baseDirPath ,
606
- expectResumeKey
607
- )
608
- ) ) ;
609
- }
621
+ if ( putExtra . resumeRecordFile || putExtra . resumeRecorder ) {
622
+ should . exists ( recordPersistPath ) ;
623
+ should . ok ( ! fs . existsSync ( recordPersistPath ) ) ;
610
624
}
611
625
} ;
612
626
0 commit comments