@@ -17,6 +17,7 @@ const rpc = require('../rpc');
17
17
const { SERVICE_NAME } = require ( '../httpc/region' ) ;
18
18
const { ResponseWrapper } = require ( '../httpc/responseWrapper' ) ;
19
19
const { Endpoint } = require ( '../httpc/endpoint' ) ;
20
+ const { StaticRegionsProvider } = require ( '../httpc/regionsProvider' ) ;
20
21
const { EndpointsRetryPolicy } = require ( '../httpc/endpointsRetryPolicy' ) ;
21
22
const { RegionsRetryPolicy } = require ( '../httpc/regionsRetryPolicy' ) ;
22
23
const { Retrier } = require ( '../retry' ) ;
@@ -86,6 +87,7 @@ function PutExtra (
86
87
this . fname = fname || '' ;
87
88
this . params = params || { } ;
88
89
this . mimeType = mimeType || null ;
90
+ // @deprecated use resumeRecorder and resumeKey instead
89
91
this . resumeRecordFile = resumeRecordFile || null ;
90
92
this . progressCallback = progressCallback || null ;
91
93
this . partSize = partSize || conf . BLOCK_SIZE ;
@@ -100,56 +102,83 @@ function PutExtra (
100
102
* @param {Object } options
101
103
* @param {string } options.accessKey
102
104
* @param {string } options.bucketName
103
- * @param {boolean } [options.retryable]
104
- * @param {'v1' | 'v2' | string } [options.uploadApiVersion]
105
- * @param {JsonFileRecorder } [options.resumeRecorder]
106
- * @param {string } [options.resumeKey]
105
+ * @param {string } [options.key]
106
+ * @param {string } [options.filePath]
107
+ * @param {PutExtra } options.putExtra
108
+ *
109
+ * @returns Retrier
107
110
*/
108
111
function _getRegionsRetrier ( options ) {
109
112
const {
110
- bucketName,
111
113
accessKey,
112
- retryable = true ,
114
+ bucketName,
115
+ key,
116
+ filePath,
113
117
114
- uploadApiVersion,
115
- resumeRecorder,
116
- resumeKey
118
+ putExtra
117
119
} = options ;
118
120
119
121
const preferredScheme = this . config . useHttpsDomain ? 'https' : 'http' ;
120
- let preferredEndpoints ;
121
- const isResumeAvailable = Boolean ( resumeRecorder && resumeKey ) ;
122
- if ( isResumeAvailable ) {
123
- const resumeInfo = resumeRecorder . getSync ( resumeKey ) ;
124
- if ( resumeInfo && Array . isArray ( resumeInfo . upDomains ) ) {
125
- preferredEndpoints = resumeInfo . upDomains . map ( d =>
126
- new Endpoint ( d , { defaultScheme : preferredScheme } ) ) ;
127
- }
122
+
123
+ let regionsProviderPromise = this . config . getRegionsProvider ( {
124
+ accessKey,
125
+ bucketName
126
+ } ) ;
127
+
128
+ // generate resume key, if there is a recorder but not resume key
129
+ if ( putExtra . resumeRecorder && ! putExtra . resumeKey ) {
130
+ regionsProviderPromise = regionsProviderPromise
131
+ . then ( regionsProvider => regionsProvider . getRegions ( ) )
132
+ . then ( regions => {
133
+ if ( ! regions || ! regions . length ) {
134
+ return Promise . reject ( new Error ( `no region available for the bucket "${ bucketName } "` ) ) ;
135
+ }
136
+ const upAccEndpoints = regions [ 0 ] . services [ SERVICE_NAME . UP_ACC ] || [ ] ;
137
+ const upEndpoints = regions [ 0 ] . services [ SERVICE_NAME . UP ] || [ ] ;
138
+ const upHosts = upAccEndpoints . concat ( upEndpoints ) . map ( e => e . host ) ;
139
+ putExtra . resumeKey = putExtra . resumeRecorder . generateKeySync ( {
140
+ hosts : upHosts ,
141
+ accessKey : accessKey ,
142
+ bucketName : bucketName ,
143
+ key : key ,
144
+ filePath : filePath ,
145
+ version : putExtra . version ,
146
+ partSize : putExtra . partSize
147
+ } ) ;
148
+ return new StaticRegionsProvider ( regions ) ;
149
+ } ) ;
128
150
}
129
151
130
- return this . config . getRegionsProvider ( {
131
- bucketName,
132
- accessKey
133
- } )
152
+ return regionsProviderPromise
134
153
. then ( regionsProvider => {
154
+ // handle preferred endpoints
155
+ let preferredEndpoints ;
156
+ if ( putExtra . resumeRecorder && putExtra . resumeKey ) {
157
+ const resumeInfo = putExtra . resumeRecorder . getSync ( putExtra . resumeKey ) ;
158
+ if ( resumeInfo && Array . isArray ( resumeInfo . upDomains ) ) {
159
+ preferredEndpoints = resumeInfo . upDomains . map ( d =>
160
+ new Endpoint ( d , { defaultScheme : preferredScheme } ) ) ;
161
+ }
162
+ }
163
+
135
164
const serviceNames = this . config . accelerateUploading
136
165
? [ SERVICE_NAME . UP_ACC , SERVICE_NAME . UP ]
137
166
: [ SERVICE_NAME . UP ] ;
138
167
const retryPolicies = [
139
168
new AccUnavailableRetryPolicy ( ) ,
140
169
new TokenExpiredRetryPolicy ( {
141
- uploadApiVersion,
170
+ uploadApiVersion : putExtra . version ,
142
171
recordExistsHandler : ( ) => {
143
- if ( ! isResumeAvailable ) {
172
+ if ( ! putExtra . resumeRecorder || ! putExtra . resumeKey ) {
144
173
return ;
145
174
}
146
- resumeRecorder . hasSync ( resumeKey ) ;
175
+ putExtra . resumeRecorder . hasSync ( putExtra . resumeKey ) ;
147
176
} ,
148
177
recordDeleteHandler : ( ) => {
149
- if ( ! isResumeAvailable ) {
178
+ if ( ! putExtra . resumeRecorder || ! putExtra . resumeKey ) {
150
179
return ;
151
180
}
152
- resumeRecorder . deleteSync ( resumeKey ) ;
181
+ putExtra . resumeRecorder . deleteSync ( putExtra . resumeKey ) ;
153
182
}
154
183
} ) ,
155
184
new EndpointsRetryPolicy ( {
@@ -159,10 +188,10 @@ function _getRegionsRetrier (options) {
159
188
regionsProvider,
160
189
serviceNames,
161
190
onChangedRegion : ( ) => {
162
- if ( ! isResumeAvailable ) {
191
+ if ( ! putExtra . resumeRecorder || ! putExtra . resumeKey ) {
163
192
return ;
164
193
}
165
- resumeRecorder . deleteSync ( resumeKey ) ;
194
+ putExtra . resumeRecorder . deleteSync ( putExtra . resumeKey ) ;
166
195
} ,
167
196
preferredEndpoints
168
197
} )
@@ -175,12 +204,12 @@ function _getRegionsRetrier (options) {
175
204
if ( context . error . noNeedRetry ) {
176
205
return false ;
177
206
}
178
- return retryable ;
207
+ return true ;
179
208
}
180
209
if ( policy instanceof AccUnavailableRetryPolicy ) {
181
210
return true ;
182
211
}
183
- return retryable && context . result && context . result . needRetry ( ) ;
212
+ return context . result && context . result . needRetry ( ) ;
184
213
}
185
214
} ) ;
186
215
} ) ;
@@ -219,33 +248,39 @@ ResumeUploader.prototype.putStream = function (
219
248
}
220
249
) ;
221
250
222
- // Why need retrier even if retryable is false?
223
- // Because the retrier is used to get the endpoints,
224
- // which will be initialed by region policy.
225
- const result = _getRegionsRetrier . call ( this , {
226
- bucketName : util . getBucketFromUptoken ( uploadToken ) ,
227
- accessKey : util . getAKFromUptoken ( uploadToken ) ,
228
- retryable : false
251
+ const bucketName = util . getBucketFromUptoken ( uploadToken ) ;
252
+ const accessKey = util . getAKFromUptoken ( uploadToken ) ;
229
253
230
- // useless by not retryable
231
- // uploadApiVersion: putExtra.version,
254
+ const result = this . config . getRegionsProvider ( {
255
+ bucketName,
256
+ accessKey
232
257
} )
233
- . then ( retrier => Promise . all ( [
234
- retrier ,
235
- retrier . initContext ( )
236
- ] ) )
237
- . then ( ( [ retrier , context ] ) => retrier . retry ( {
238
- func : context => putReq (
239
- context . endpoint ,
258
+ . then ( regionsProvider => regionsProvider . getRegions ( ) )
259
+ . then ( regions => {
260
+ if ( ! regions || ! regions . length ) {
261
+ return Promise . reject ( new Error ( 'no region available for the bucket' , bucketName ) ) ;
262
+ }
263
+ const preferService = this . config . accelerateUploading
264
+ ? SERVICE_NAME . UP_ACC
265
+ : SERVICE_NAME . UP ;
266
+ if (
267
+ ! regions [ 0 ] . services ||
268
+ ! regions [ 0 ] . services [ preferService ] ||
269
+ ! regions [ 0 ] . services [ preferService ] . length
270
+ ) {
271
+ return Promise . reject ( new Error ( 'no endpoint available for the bucket' , bucketName ) ) ;
272
+ }
273
+ const endpoint = regions [ 0 ] . services [ preferService ] [ 0 ] ;
274
+ return putReq (
275
+ endpoint ,
240
276
preferredScheme ,
241
277
uploadToken ,
242
278
key ,
243
279
rsStream ,
244
280
rsStreamLen ,
245
281
putExtra
246
- ) ,
247
- context
248
- } ) ) ;
282
+ ) ;
283
+ } ) ;
249
284
250
285
handleReqCallback ( result , callbackFunc ) ;
251
286
@@ -838,38 +873,36 @@ ResumeUploader.prototype.putFile = function (
838
873
putExtra . fname = path . basename ( localFile ) ;
839
874
}
840
875
841
- const akFromToken = util . getAKFromUptoken ( uploadToken ) ;
842
- const bucketFromToken = util . getBucketFromUptoken ( uploadToken ) ;
876
+ const accessKey = util . getAKFromUptoken ( uploadToken ) ;
877
+ const bucketName = util . getBucketFromUptoken ( uploadToken ) ;
878
+
843
879
putExtra = getDefaultPutExtra (
844
880
putExtra ,
845
881
{
846
- accessKey : akFromToken ,
847
- bucketName : bucketFromToken ,
848
- key,
849
- filePath : localFile
882
+ key
850
883
}
851
884
) ;
852
885
853
886
const result = _getRegionsRetrier . call ( this , {
854
- accessKey : akFromToken ,
855
- bucketName : bucketFromToken ,
887
+ accessKey,
888
+ bucketName,
889
+ key,
890
+ filePath : localFile ,
856
891
857
- uploadApiVersion : putExtra . version ,
858
- resumeRecorder : putExtra . resumeRecorder ,
859
- resumeKey : putExtra . resumeKey
892
+ putExtra
860
893
} )
861
894
. then ( retrier => Promise . all ( [
862
895
retrier ,
863
896
retrier . initContext ( )
864
897
] ) )
865
898
. then ( ( [ retrier , context ] ) => retrier . retry ( {
866
- func : context => {
899
+ func : ctx => {
867
900
const rsStream = fs . createReadStream ( localFile , {
868
901
highWaterMark : conf . BLOCK_SIZE
869
902
} ) ;
870
903
const rsStreamLen = fs . statSync ( localFile ) . size ;
871
904
const p = putReq (
872
- context . endpoint ,
905
+ ctx . endpoint ,
873
906
preferredScheme ,
874
907
uploadToken ,
875
908
key ,
@@ -914,10 +947,7 @@ ResumeUploader.prototype.putFileWithoutKey = function (
914
947
/**
915
948
* @param {PutExtra } putExtra
916
949
* @param {Object } options
917
- * @param {string } [options.accessKey]
918
- * @param {string } [options.bucketName]
919
950
* @param {string | null } [options.key]
920
- * @param {string } [options.filePath]
921
951
* @returns {PutExtra }
922
952
*/
923
953
function getDefaultPutExtra ( putExtra , options ) {
@@ -943,30 +973,6 @@ function getDefaultPutExtra (putExtra, options) {
943
973
putExtra . resumeKey = parsedPath . name ;
944
974
}
945
975
946
- // generate `resumeKey` if not exists
947
- if (
948
- putExtra . resumeRecorder &&
949
- ! putExtra . resumeKey &&
950
- options . filePath &&
951
- options . accessKey &&
952
- options . bucketName
953
- ) {
954
- let fileLastModify ;
955
- try {
956
- fileLastModify = options . filePath && fs . statSync ( options . filePath ) . mtimeMs . toString ( ) ;
957
- } catch ( _err ) {
958
- fileLastModify = '' ;
959
- }
960
- const recordValuesToHash = [
961
- putExtra . version ,
962
- options . accessKey ,
963
- `${ options . bucketName } :${ options . key } ` ,
964
- options . filePath ,
965
- fileLastModify
966
- ] ;
967
- putExtra . resumeKey = putExtra . resumeRecorder . generateKey ( recordValuesToHash ) ;
968
- }
969
-
970
976
return putExtra ;
971
977
}
972
978
@@ -1001,9 +1007,9 @@ JsonFileRecorder.prototype.setSync = function (key, data) {
1001
1007
* @returns {undefined | Object.<string, any> }
1002
1008
*/
1003
1009
JsonFileRecorder . prototype . getSync = function ( key ) {
1004
- const filePath = path . join ( this . baseDirPath , key ) ;
1005
1010
let result ;
1006
1011
try {
1012
+ const filePath = path . join ( this . baseDirPath , key ) ;
1007
1013
const recordContent = fs . readFileSync (
1008
1014
filePath ,
1009
1015
{
@@ -1018,24 +1024,71 @@ JsonFileRecorder.prototype.getSync = function (key) {
1018
1024
} ;
1019
1025
1020
1026
JsonFileRecorder . prototype . hasSync = function ( key ) {
1021
- const filePath = path . join ( this . baseDirPath , key ) ;
1022
1027
try {
1028
+ const filePath = path . join ( this . baseDirPath , key ) ;
1023
1029
return fs . existsSync ( filePath ) ;
1024
1030
} catch ( _err ) {
1025
1031
return false ;
1026
1032
}
1027
1033
} ;
1028
1034
1029
1035
JsonFileRecorder . prototype . deleteSync = function ( key ) {
1030
- const filePath = path . join ( this . baseDirPath , key ) ;
1031
1036
try {
1037
+ const filePath = path . join ( this . baseDirPath , key ) ;
1032
1038
fs . unlinkSync ( filePath ) ;
1033
1039
} catch ( _err ) {
1034
1040
// pass
1035
1041
}
1036
1042
} ;
1037
1043
1038
- JsonFileRecorder . prototype . generateKey = function ( fields ) {
1044
+ /**
1045
+ * @param {Object } options
1046
+ * @param {string[] } options.hosts
1047
+ * @param {string } options.accessKey
1048
+ * @param {string } options.bucketName
1049
+ * @param {string } options.key
1050
+ * @param {string } options.filePath
1051
+ * @param {string } options.version
1052
+ * @param {string } options.partSize
1053
+ * @returns {string | undefined }
1054
+ */
1055
+ JsonFileRecorder . prototype . generateKeySync = function ( options ) {
1056
+ // if some options not pass in, can't generate a valid key
1057
+ if (
1058
+ [
1059
+ Array . isArray ( options . hosts ) ,
1060
+ options . accessKey ,
1061
+ options . bucketName ,
1062
+ options . key ,
1063
+ options . filePath ,
1064
+ options . version ,
1065
+ options . partSize
1066
+ ] . some ( v => ! v )
1067
+ ) {
1068
+ return ;
1069
+ }
1070
+
1071
+ let fileStats ;
1072
+ try {
1073
+ fileStats = options . filePath && fs . statSync ( options . filePath ) ;
1074
+ } catch ( _err ) {
1075
+ return ;
1076
+ }
1077
+
1078
+ const fields = [
1079
+ options . hosts . join ( '' ) ,
1080
+ options . accessKey ,
1081
+ options . bucketName ,
1082
+ options . key || '' ,
1083
+ options . filePath ,
1084
+ fileStats ? fileStats . mtimeMs . toString ( ) : '' ,
1085
+ fileStats ? fileStats . size . toString ( ) : '' ,
1086
+ options . version , // the upload version
1087
+ options . version === 'v1'
1088
+ ? conf . BLOCK_SIZE . toString ( )
1089
+ : options . partSize . toString ( ) ,
1090
+ 'json.v1' // the record file format version
1091
+ ] ;
1039
1092
const h = crypto . createHash ( 'sha1' ) ;
1040
1093
fields . forEach ( v => {
1041
1094
h . update ( v ) ;
0 commit comments