From 4a980d0959dc04b4d17ab443891b71d4a5a0b28b Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Fri, 17 May 2024 15:20:55 +0800 Subject: [PATCH 01/18] chore: adjust hosts for querying regions --- qiniu/conf.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/qiniu/conf.js b/qiniu/conf.js index 11f9c6d..66c3ca9 100644 --- a/qiniu/conf.js +++ b/qiniu/conf.js @@ -27,8 +27,8 @@ exports.FormMimeRaw = 'application/octet-stream'; exports.RS_HOST = 'rs.qiniu.com'; exports.RPC_TIMEOUT = 600000; // 600s let QUERY_REGION_BACKUP_HOSTS = [ - 'uc.qbox.me', - 'api.qiniu.com' + 'kodo-config.qiniuapi.com', + 'uc.qbox.me' ]; Object.defineProperty(exports, 'QUERY_REGION_BACKUP_HOSTS', { get: () => QUERY_REGION_BACKUP_HOSTS, @@ -36,7 +36,7 @@ Object.defineProperty(exports, 'QUERY_REGION_BACKUP_HOSTS', { QUERY_REGION_BACKUP_HOSTS = v; } }); -let QUERY_REGION_HOST = 'kodo-config.qiniuapi.com'; +let QUERY_REGION_HOST = 'uc.qiniuapi.com'; Object.defineProperty(exports, 'QUERY_REGION_HOST', { get: () => QUERY_REGION_HOST, set: v => { From 8fdbf9e81a36960563e0b5af55ad4bb450568309 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Thu, 16 May 2024 19:02:23 +0800 Subject: [PATCH 02/18] feat: add Qiniu auth verify callback --- index.d.ts | 16 +++++++++++++++- qiniu/util.js | 45 ++++++++++++++++++++++++++++++++++++++------- test/util.test.js | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 8 deletions(-) diff --git a/index.d.ts b/index.d.ts index 908edfd..9d5a08b 100644 --- a/index.d.ts +++ b/index.d.ts @@ -514,8 +514,22 @@ export declare namespace util { * @param requestURI 回调的URL中的requestURI * @param reqBody 回调的URL中的requestURI 请求Body,仅当请求的ContentType为application/x-www-form-urlencoded时才需要传入该参数 * @param callbackAuth 回调时请求的Authorization头部值 + * @param extra 当回调为 Qiniu 签名时需要传入 + * @param extra.reqMethod 请求方法,例如 GET,POST + * @param extra.reqContentType 请求类型,例如 application/json 或者 application/x-www-form-urlencoded + * @param extra.reqHeaders 请求头部 */ - function isQiniuCallback(mac: auth.digest.Mac, requestURI: string, reqBody: string | null, callbackAuth: string): boolean; + function isQiniuCallback( + mac: auth.digest.Mac, + requestURI: string, + reqBody: string | null, + callbackAuth: string, + extra?: { + reqMethod: string, + reqContentType?: string, + reqHeaders?: Record + } + ): boolean; } export declare namespace httpc { diff --git a/qiniu/util.js b/qiniu/util.js index b3d3f92..c1678b8 100644 --- a/qiniu/util.js +++ b/qiniu/util.js @@ -115,16 +115,16 @@ exports.getMd5 = function (data) { // @param reqBody 请求Body,仅当请求的 ContentType 为 // application/x-www-form-urlencoded时才需要传入该参数 exports.generateAccessToken = function (mac, requestURI, reqBody) { - var u = new url.URL(requestURI); - var path = u.pathname + u.search; - var access = path + '\n'; + const u = new url.URL(requestURI); + const path = u.pathname + u.search; + let access = path + '\n'; if (reqBody) { access += reqBody; } - var digest = exports.hmacSha1(access, mac.secretKey); - var safeDigest = exports.base64ToUrlSafe(digest); + const digest = exports.hmacSha1(access, mac.secretKey); + const safeDigest = exports.base64ToUrlSafe(digest); return 'QBox ' + mac.accessKey + ':' + safeDigest; }; @@ -311,8 +311,39 @@ exports.generateAccessTokenV2 = function (mac, requestURI, reqMethod, reqContent // @param reqBody 请求Body,仅当请求的ContentType为 // application/x-www-form-urlencoded时才需要传入该参数 // @param callbackAuth 回调时请求的Authorization头部值 -exports.isQiniuCallback = function (mac, requestURI, reqBody, callbackAuth) { - var auth = exports.generateAccessToken(mac, requestURI, reqBody); + +/** + * @param {Mac} mac AK&SK对象 + * @param {string} requestURI 请求URL + * @param {string | Buffer} reqBody 请求Body,仅当请求的 ContentType 为 application/json 或 application/x-www-form-urlencoded 时才需要传入该参数 + * @param {string} callbackAuth 回调时请求的 Authorization 头部值 + * @param {Object} [extra] 当回调为 Qiniu 签名时需要传入 + * @param {string} extra.reqMethod 请求方法,例如 GET,POST + * @param {string} [extra.reqContentType] 请求类型,例如 application/json 或者 application/x-www-form-urlencoded + * @param {Object.} [extra.reqHeaders] 请求头部 + * @return {boolean} + */ +exports.isQiniuCallback = function ( + mac, + requestURI, + reqBody, + callbackAuth, + extra +) { + let auth; + if (callbackAuth.startsWith('Qiniu')) { + auth = exports.generateAccessTokenV2( + mac, + requestURI, + extra.reqMethod, + extra.reqContentType, + reqBody, + extra.reqHeaders + ); + } else { + auth = exports.generateAccessToken(mac, requestURI, reqBody); + } + return auth === callbackAuth; }; diff --git a/test/util.test.js b/test/util.test.js index 75a3d22..7000dd8 100644 --- a/test/util.test.js +++ b/test/util.test.js @@ -488,4 +488,36 @@ describe('test util functions', function () { }); }); }); + + describe('test isQiniuCallback', function () { + const mac = new qiniu.auth.digest.Mac( + 'abcdefghklmnopq', + '1234567890' + ); + it('test Qbox verification', () => { + const ok = qiniu.util.isQiniuCallback( + mac, + 'https://test.qiniu.com/callback', + 'name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123', + 'QBox abcdefghklmnopq:T7F-SjxX7X2zI4Fc1vANiNt1AUE=' + ); + should.ok(ok); + }); + it('test Qiniu verification', () => { + const ok = qiniu.util.isQiniuCallback( + mac, + 'https://test.qiniu.com/callback', + 'name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123', + 'Qiniu abcdefghklmnopq:ZqS7EZuAKrhZaEIxqNGxDJi41IQ=', + { + reqMethod: 'GET', + reqContentType: 'application/x-www-form-urlencoded', + reqHeaders: { + 'X-Qiniu-Bbb': 'BBB' + } + } + ); + should.ok(ok); + }); + }); }); From 8538e5473c88414f4f4bb6e4674ba3726c757ede Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Fri, 17 May 2024 18:01:45 +0800 Subject: [PATCH 03/18] feat: add creatBucket and deleteBucket --- index.d.ts | 25 +++++++++++++++++++++++++ qiniu/storage/rs.js | 45 ++++++++++++++++++++++++++++++++++++++++++++- test/rs.test.js | 45 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) diff --git a/index.d.ts b/index.d.ts index 9d5a08b..9b7af46 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1303,6 +1303,31 @@ export declare namespace rs { */ listBucket(callbackFunc?: callback): Promise> + /** + * 创建空间 + * @param bucket 空间名 + * @param options 选项 + * @param options.regionId 区域 ID + * @param callbackFunc 回调函数 + */ + createBucket( + bucket: string, + options: { + regionId: string + }, + callbackFunc?: callback + ): Promise> + + /** + * 删除空间 + * @param bucket 空间名 + * @param callbackFunc 回调函数 + */ + deleteBucket( + bucket: string, + callbackFunc?: callback + ): Promise> + /** * 获取空间详情 * @param bucket - 空间名 diff --git a/qiniu/storage/rs.js b/qiniu/storage/rs.js index 71ae2da..63c33a6 100644 --- a/qiniu/storage/rs.js +++ b/qiniu/storage/rs.js @@ -979,7 +979,50 @@ BucketManager.prototype.listBucket = function (callbackFunc) { }; /** - * 获取bucket信息 + * 创建 bucket + * @param {string} bucket 空间名 + * @param {Object} options 选项 + * @param {string} options.regionId 区域 ID + * @param {BucketOperationCallback} [callbackFunc] 回调函数 + * @returns {Promise} + */ +BucketManager.prototype.createBucket = function (bucket, options, callbackFunc) { + const createBucketOp = `/mkbucketv3/${bucket}/region/${options.regionId}`; + return _tryReq.call(this, { + serviceName: SERVICE_NAME.UC, + func: context => { + const requestURL = _getEndpointVal.call(this, context.endpoint) + createBucketOp; + return this._httpClient.post({ + url: requestURL, + callback: wrapTryCallback(callbackFunc) + }); + } + }); +}; + +/** + * 删除 bucket + * @param {string} bucket 空间名 + * @param {BucketOperationCallback} [callbackFunc] 回调函数 + * @returns {Promise} + */ +BucketManager.prototype.deleteBucket = function (bucket, callbackFunc) { + const deleteBucketOp = `/drop/${bucket}`; + + return _tryReq.call(this, { + serviceName: SERVICE_NAME.UC, + func: context => { + const requestURL = _getEndpointVal.call(this, context) + deleteBucketOp; + return this._httpClient.post({ + url: requestURL, + callback: wrapTryCallback(callbackFunc) + }); + } + }); +}; + +/** + * 获取 bucket 信息 * @param {string} bucket 空间名 * @param {BucketOperationCallback} [callbackFunc] 回调函数 * @returns {Promise} diff --git a/test/rs.test.js b/test/rs.test.js index 2a035ff..05657c9 100644 --- a/test/rs.test.js +++ b/test/rs.test.js @@ -462,6 +462,51 @@ describe('test start bucket manager', function () { }); }); + describe('test createBucket and deleteBucket', function () { + const targetBucketName = bucketName + 'creating' + Math.floor(Math.random() * 100000); + it('test createBucket', function () { + const promises = doAndWrapResultPromises(callback => + bucketManager.createBucket( + targetBucketName, + callback + ) + ); + + const checkFunc = ({ + _, + resp + }) => { + should.equal(resp.statusCode, 200, JSON.stringify(resp)); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc); + }); + + it('test deleteBucket', function () { + const promises = doAndWrapResultPromises(callback => + bucketManager.deleteBucket( + targetBucketName, + callback + ) + ); + + const checkFunc = ({ + _, + resp + }) => { + should.equal(resp.statusCode, 200, JSON.stringify(resp)); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc); + }); + }); + describe('test listPrefix', function () { it('test listPrefix', function () { const promises = doAndWrapResultPromises(callback => From 4e4fd6c270008369a1fbfb1ecaa047b757d8f480 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Mon, 20 May 2024 19:14:08 +0800 Subject: [PATCH 04/18] feat: listBucket support shared and tagCondition --- qiniu/storage/rs.js | 40 +++++++++++++++++++++++++++++-- test/rs.test.js | 58 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 2 deletions(-) diff --git a/qiniu/storage/rs.js b/qiniu/storage/rs.js index 63c33a6..b85df44 100644 --- a/qiniu/storage/rs.js +++ b/qiniu/storage/rs.js @@ -960,11 +960,47 @@ BucketManager.prototype.updateObjectStatus = function ( /** * 列举 bucket * @link https://developer.qiniu.com/kodo/3926/get-service + * @param {Object.} [options] + * @param {string} [options.shared] 可传入 `'rd'` 列举出读权限的空间 + * @param {Object.} [options.tagCondition] 过滤空间的标签或标签值条件,指定多个标签或标签值时同时满足条件的空间才会返回 * @param {BucketOperationCallback} [callbackFunc] 回调函数 * @returns {Promise} */ -BucketManager.prototype.listBucket = function (callbackFunc) { - const listBucketOp = '/buckets'; +BucketManager.prototype.listBucket = function (options, callbackFunc) { + let shared; + let tagCondition; + if (typeof options === 'function') { + callbackFunc = options; + } else { + options = options || {}; + shared = options.shared; + tagCondition = options.tagCondition; + } + + const reqParams = {}; + if (shared) { + reqParams.shared = shared; + } + if (tagCondition) { + reqParams.tagCondition = util.urlsafeBase64Encode( + // use Object.entries when min version of Node.js update to ≥ v7.5.0 + // the `querystring.stringify` will convert unsafe characters to percent encode, + // so stringify with below + Object.keys(tagCondition) + .map(k => { + let cond = 'key=' + k; + if (tagCondition[k]) { + cond += '&value=' + tagCondition[k]; + } + return cond; + }) + .join(';') + ); + } + const reqSpec = Object.keys(reqParams).length > 0 + ? '?' + querystring.stringify(reqParams) + : ''; + const listBucketOp = '/buckets' + reqSpec; return _tryReq.call(this, { serviceName: SERVICE_NAME.UC, diff --git a/test/rs.test.js b/test/rs.test.js index 05657c9..1322c89 100644 --- a/test/rs.test.js +++ b/test/rs.test.js @@ -443,6 +443,64 @@ describe('test start bucket manager', function () { .then(() => promises.native) .then(checkFunc); }); + + it('test listBucket shared', function () { + const promises = doAndWrapResultPromises(callback => + bucketManager.listBucket( + { + shared: 'rd' + }, + callback + ) + ); + + const checkFunc = ({ data, resp }) => { + should.equal(resp.statusCode, 200, JSON.stringify(resp)); + data.should.containEql(bucketName); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc); + }); + + const testParams4TagCondition = [ + { + sdk: 'nodejs' + }, + { + sdk: '', + lang: null + }, + { + sdk: 'nodejs', + lang: 'javascript' + } + ]; + + testParams4TagCondition.forEach(cond => { + it(`test listBucket tagCondition(${JSON.stringify(cond)})`, function () { + const promises = doAndWrapResultPromises(callback => + bucketManager.listBucket( + { + tagCondition: cond + }, + callback + ) + ); + + const checkFunc = ({ data, resp }) => { + should.equal(resp.statusCode, 200, JSON.stringify(resp)); + data.should.containEql(bucketName); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc); + }); + }); }); describe('test bucketInfo', function () { From 0acacc2aba5e8af18faf71c18e5f3ff4550b4acd Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Tue, 21 May 2024 01:17:38 +0800 Subject: [PATCH 05/18] fix: delete bucket and improve bucket event rule test cases --- qiniu/storage/rs.js | 2 +- test/rs.test.js | 26 +++++++++++++++++--------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/qiniu/storage/rs.js b/qiniu/storage/rs.js index b85df44..4f4505f 100644 --- a/qiniu/storage/rs.js +++ b/qiniu/storage/rs.js @@ -1048,7 +1048,7 @@ BucketManager.prototype.deleteBucket = function (bucket, callbackFunc) { return _tryReq.call(this, { serviceName: SERVICE_NAME.UC, func: context => { - const requestURL = _getEndpointVal.call(this, context) + deleteBucketOp; + const requestURL = _getEndpointVal.call(this, context.endpoint) + deleteBucketOp; return this._httpClient.post({ url: requestURL, callback: wrapTryCallback(callbackFunc) diff --git a/test/rs.test.js b/test/rs.test.js index 1322c89..87ec6c6 100644 --- a/test/rs.test.js +++ b/test/rs.test.js @@ -839,14 +839,22 @@ describe('test start bucket manager', function () { }); describe('test events', function () { + const bucketName4Test = 'event-test-bucket' + Math.floor(Math.random() * 100000); const eventName = 'event_test' + Math.floor(Math.random() * 100000); before(function () { - return bucketManager.deleteBucketEvent( - bucketName, - eventName - ) - .catch(() => {}); + return bucketManager.createBucket( + bucketName4Test, + { + regionId: 'z0' + } + ); + }); + + after(function () { + return bucketManager.deleteBucket( + bucketName4Test + ); }); it('test addEvents', function () { @@ -858,7 +866,7 @@ describe('test start bucket manager', function () { const promises = doAndWrapResultPromises(callback => bucketManager.putBucketEvent( - bucketName, + bucketName4Test, options, callback ) @@ -883,7 +891,7 @@ describe('test start bucket manager', function () { const promises = doAndWrapResultPromises(callback => bucketManager.updateBucketEvent( - bucketName, + bucketName4Test, options, callback ) @@ -902,7 +910,7 @@ describe('test start bucket manager', function () { it('test getEvents', function () { const promises = doAndWrapResultPromises(callback => bucketManager.getBucketEvent( - bucketName, + bucketName4Test, callback ) ); @@ -920,7 +928,7 @@ describe('test start bucket manager', function () { it('test deleteEvents', function () { const promises = doAndWrapResultPromises(callback => bucketManager.deleteBucketEvent( - bucketName, + bucketName4Test, eventName, callback ) From 0bb8c6f0b22a65a4f6f25d1db828498a51ee0922 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Tue, 4 Jun 2024 11:26:34 +0800 Subject: [PATCH 06/18] feat: add support for accelerate uploading and tune retry add accelerate uploading query and retry policy add region merge and clone method add endpoint clone method add disabling file cache for the result of regions querying by set regionsQueryResultCachePath to null fix calling callback multiple time when retry using callback style fix retry when meet urllib inner error --- index.d.ts | 17 ++ qiniu/conf.js | 24 ++- qiniu/httpc/endpoint.js | 6 + qiniu/httpc/region.js | 46 +++++ qiniu/httpc/regionsProvider.js | 1 + qiniu/httpc/regionsRetryPolicy.js | 123 ++++++++++--- qiniu/retry/retrier.js | 8 +- qiniu/storage/form.js | 91 +++++----- qiniu/storage/internal.js | 86 +++++++++- qiniu/storage/resume.js | 51 +++--- qiniu/storage/rs.js | 275 ++++++++++++++++-------------- test/conf.test.js | 29 ++++ test/form_up.test.js | 101 +++++++++++ test/httpc.test.js | 184 ++++++++++++++++++-- test/resume_up.test.js | 132 ++++++++++++++ test/rs.test.js | 5 +- test/storage_internal.test.js | 152 +++++++++++++++++ 17 files changed, 1091 insertions(+), 240 deletions(-) diff --git a/index.d.ts b/index.d.ts index 9b7af46..3cd586e 100644 --- a/index.d.ts +++ b/index.d.ts @@ -148,6 +148,14 @@ export declare namespace conf { useHttpsDomain?: boolean; /** + * 在使用前需要提前开通加速域名 + * 详见:https://developer.qiniu.com/kodo/12656/transfer-acceleration + * @default false + */ + accelerateUploading?: boolean; + + /** + * @deprecated 实际已无加速上传能力,使用 accelerateUploading 代替 * @default true */ useCdnDomain?: boolean; @@ -185,6 +193,7 @@ export declare namespace conf { ucEndpointsProvider?: httpc.EndpointsProvider | null; queryRegionsEndpointsProvider?: httpc.EndpointsProvider | null; regionsProvider?: httpc.RegionsProvider | null; + regionsQueryResultCachePath?: string | null; zone?: Zone | null; zoneExpire?: number; @@ -702,12 +711,15 @@ export declare namespace httpc { getValue(options?: {scheme?: string}): string; getEndpoints(): Promise; + + clone(): Endpoint; } // region.js enum SERVICE_NAME { UC = 'uc', UP = 'up', + UP_ACC = 'up_acc', IO = 'io', RS = 'rs', RSF = 'rsf', @@ -778,6 +790,7 @@ export declare namespace httpc { class Region implements RegionsProvider { static fromZone(zone: conf.Zone, options?: RegionFromZoneOptions): Region; static fromRegionId(regionId: string, options?: RegionFromRegionIdOptions): Region; + static merge(...r: Region[]): Region; // non-unique regionId?: string; @@ -791,6 +804,10 @@ export declare namespace httpc { getRegions(): Promise; + clone(): Region; + + merge(...r: Region[]): Region; + get isLive(): boolean; } diff --git a/qiniu/conf.js b/qiniu/conf.js index 66c3ca9..cfd775e 100644 --- a/qiniu/conf.js +++ b/qiniu/conf.js @@ -64,27 +64,37 @@ const Config = (function () { * @constructor * @param {Object} [options] * @param {boolean} [options.useHttpsDomain] - * @param {boolean} [options.useCdnDomain] + * @param {boolean} [options.accelerateUploading] should active the domains before using * @param {EndpointsProvider} [options.ucEndpointsProvider] * @param {EndpointsProvider} [options.queryRegionsEndpointsProvider] * @param {RegionsProvider} [options.regionsProvider] - * @param {Zone} [options.zone] - * @param {number} [options.zoneExpire] + * @param {string} [options.regionsQueryResultCachePath] + * + * @param {boolean} [options.useCdnDomain] DEPRECATED: use accelerateUploading instead + * @param {Zone} [options.zone] DEPRECATED: use RegionsProvider instead + * @param {number} [options.zoneExpire] DEPRECATED */ function Config (options) { options = options || {}; // use http or https protocol this.useHttpsDomain = !!(options.useHttpsDomain || false); - // use cdn accelerated domains, this is not work with auto query region - this.useCdnDomain = !!(options.useCdnDomain && true); + + // use accelerate upload domains + this.accelerateUploading = !!(options.accelerateUploading || false); + // custom uc endpoints this.ucEndpointsProvider = options.ucEndpointsProvider || null; // custom query region endpoints this.queryRegionsEndpointsProvider = options.queryRegionsEndpointsProvider || null; // custom regions this.regionsProvider = options.regionsProvider || null; + // custom cache persisting path for regions query result + // only worked with default CachedRegionsProvider + this.regionsQueryResultCachePath = options.regionsQueryResultCachePath; // deprecated + // use cdn accelerated domains, this is not work with auto query region + this.useCdnDomain = !!(options.useCdnDomain && true); // zone of the bucket this.zone = options.zone || null; this.zoneExpire = options.zoneExpire || -1; @@ -216,9 +226,11 @@ const Config = (function () { const cacheKey = [ endpointsMd5, accessKey, - bucketName + bucketName, + this.accelerateUploading.toString() ].join(':'); return new CachedRegionsProvider({ + persistPath: this.regionsQueryResultCachePath, cacheKey, baseRegionsProvider: new QueryRegionsProvider({ accessKey: accessKey, diff --git a/qiniu/httpc/endpoint.js b/qiniu/httpc/endpoint.js index 333ced5..8c9b118 100644 --- a/qiniu/httpc/endpoint.js +++ b/qiniu/httpc/endpoint.js @@ -57,4 +57,10 @@ Endpoint.prototype.getEndpoints = function () { return Promise.resolve([this]); }; +Endpoint.prototype.clone = function () { + return new Endpoint(this.host, { + defaultScheme: this.defaultScheme + }); +}; + exports.Endpoint = Endpoint; diff --git a/qiniu/httpc/region.js b/qiniu/httpc/region.js index b40c641..ce0dc4d 100644 --- a/qiniu/httpc/region.js +++ b/qiniu/httpc/region.js @@ -31,6 +31,7 @@ const { Endpoint } = require('./endpoint'); const SERVICE_NAME = { UC: 'uc', UP: 'up', + UP_ACC: 'up_acc', IO: 'io', RS: 'rs', RSF: 'rsf', @@ -274,6 +275,29 @@ Region.fromRegionId = function (regionId, options) { }); }; +/** + * @param {Region} regions + * @returns {Region} + */ +Region.merge = function (...regions) { + const [source, ...rest] = regions; + const target = source.clone(); + rest.forEach(s => { + Object.keys(s.services).forEach(n => { + if (!target.services[n]) { + target.services[n] = s.services[n].map(endpoint => endpoint.clone()); + return; + } + s.services[n].forEach(endpoint => { + if (!target.services[n].some(existsEndpoint => existsEndpoint.getValue() === endpoint.getValue())) { + target.services[n].push(endpoint.clone()); + } + }); + }); + }); + return target; +}; + /** * @returns {Promise} */ @@ -281,6 +305,28 @@ Region.prototype.getRegions = function () { return Promise.resolve([this]); }; +Region.prototype.clone = function () { + const services = Object.keys(this.services).reduce((s, n) => { + s[n] = this.services[n].map(endpoint => endpoint.clone()); + return s; + }, {}); + return new Region({ + regionId: this.regionId, + s3RegionId: this.s3RegionId, + services: services, + ttl: this.ttl, + createTime: this.createTime + }); +}; + +/** + * @param {Region} regions + * @returns {Region} + */ +Region.prototype.merge = function (...regions) { + return Region.merge(this, ...regions); +}; + Object.defineProperty(Region.prototype, 'isLive', { /** * @returns {boolean} diff --git a/qiniu/httpc/regionsProvider.js b/qiniu/httpc/regionsProvider.js index 4f6aa17..0ef1586 100644 --- a/qiniu/httpc/regionsProvider.js +++ b/qiniu/httpc/regionsProvider.js @@ -752,6 +752,7 @@ const QueryRegionsProvider = (function () { let services = { [SERVICE_NAME.UC]: convertToEndpoints(data.uc.domains), [SERVICE_NAME.UP]: convertToEndpoints(data.up.domains), + [SERVICE_NAME.UP_ACC]: convertToEndpoints(data.up.acc_domains), [SERVICE_NAME.IO]: convertToEndpoints(data.io.domains), [SERVICE_NAME.RS]: convertToEndpoints(data.rs.domains), [SERVICE_NAME.RSF]: convertToEndpoints(data.rsf.domains), diff --git a/qiniu/httpc/regionsRetryPolicy.js b/qiniu/httpc/regionsRetryPolicy.js index 357aaaf..79dba6f 100644 --- a/qiniu/httpc/regionsRetryPolicy.js +++ b/qiniu/httpc/regionsRetryPolicy.js @@ -13,12 +13,15 @@ const { * @typedef {EndpointsRetryPolicyContext} RegionsRetryPolicyContext * @property {Region} region * @property {Region[]} alternativeRegions + * @property {SERVICE_NAME} serviceName + * @property {SERVICE_NAME[]} alternativeServiceNames */ /** * @class * @extends RetryPolicy - * @param {SERVICE_NAME} options.serviceName + * @param {SERVICE_NAME} options.serviceName DEPRECATE: use options.serviceNames instead + * @param {SERVICE_NAME[]} options.serviceNames * @param {Region[]} [options.regions] * @param {RegionsProvider} [options.regionsProvider] * @param {Endpoint[]} [options.preferredEndpoints] @@ -27,7 +30,19 @@ const { * @constructor */ function RegionsRetryPolicy (options) { - this.serviceName = options.serviceName; + /** + * @type {SERVICE_NAME[]} + */ + this.serviceNames = options.serviceNames || []; + if (!this.serviceNames.length) { + this.serviceNames = [options.serviceName]; + } + if (!this.serviceNames.length) { + throw new TypeError('Must provide one service name at least'); + } + // compatible, remove when make break changes + this.serviceName = this.serviceNames[0]; + this.regions = options.regions || []; this.regionsProvider = options.regionsProvider || new StaticRegionsProvider([]); this.preferredEndpoints = options.preferredEndpoints || []; @@ -66,7 +81,7 @@ RegionsRetryPolicy.prototype.initContext = function (context) { * @returns {boolean} */ RegionsRetryPolicy.prototype.shouldRetry = function (context) { - return context.alternativeRegions.length > 0; + return context.alternativeRegions.length > 0 || context.alternativeServiceNames.length > 0; }; /** @@ -74,10 +89,14 @@ RegionsRetryPolicy.prototype.shouldRetry = function (context) { * @returns {Promise} */ RegionsRetryPolicy.prototype.prepareRetry = function (context) { - context.region = context.alternativeRegions.shift(); - if (!context.region) { + if (context.alternativeServiceNames.length) { + context.serviceName = context.alternativeServiceNames.shift(); + } else if (context.alternativeRegions.length) { + context.region = context.alternativeRegions.shift(); + [context.serviceName, ...context.alternativeServiceNames] = this.serviceNames; + } else { return Promise.reject( - new Error('There isn\'t available region for next try') + new Error('There isn\'t available region or service for next try') ); } return this._prepareEndpoints(context) @@ -88,6 +107,44 @@ RegionsRetryPolicy.prototype.prepareRetry = function (context) { }); }; +/** + * @typedef GetPreferredRegionInfoResult + * @property {number} preferredServiceIndex + * @property {number} preferredRegionIndex + */ + +/** + * @param {Region[]} options.regions + * @param {Endpoint[]} options.preferredEndpoints + * @returns {GetPreferredRegionInfoResult} + * @protected + */ +RegionsRetryPolicy.prototype._getPreferredRegionInfo = function (options) { + const { + regions, + preferredEndpoints + } = options; + + const serviceNames = this.serviceNames.slice(); + + let preferredServiceIndex = -1; + const preferredRegionIndex = regions.findIndex(r => + serviceNames.some((s, si) => + r.services[s].some(e => { + const res = preferredEndpoints.some(pe => pe.host === e.host); + if (res) { + preferredServiceIndex = si; + } + return res; + }) + ) + ); + return { + preferredServiceIndex, + preferredRegionIndex + }; +}; + /** * @param {RegionsRetryPolicyContext} options.context * @param {Region[]} options.regions @@ -110,27 +167,40 @@ RegionsRetryPolicy.prototype._initRegions = function (options) { if (!preferredEndpoints.length) { [context.region, ...context.alternativeRegions] = regions; + [context.serviceName, ...context.alternativeServiceNames] = this.serviceNames; return Promise.resolve(); } - // find preferred region by preferred endpoints - const preferredRegionIndex = regions.findIndex(r => - r.services[this.serviceName].some(e => - preferredEndpoints.some(pe => pe.host === e.host) - ) - ); + // find preferred serviceName and region by preferred endpoints + const { + preferredRegionIndex, + preferredServiceIndex + } = this._getPreferredRegionInfo({ + regions, + preferredEndpoints + }); + + // initialize the order of serviceNames and regions if (preferredRegionIndex < 0) { // preferred endpoints is not a region, then make all regions alternative + [context.serviceName, ...context.alternativeServiceNames] = this.serviceNames; + // compatible, remove when make break changes + this.serviceName = context.serviceName; + context.region = new Region({ services: { - [this.serviceName]: preferredEndpoints + [context.serviceName]: preferredEndpoints } }); context.alternativeRegions = regions; } else { - // preferred endpoints is a region, then reorder the regions - [context.region] = regions.splice(preferredRegionIndex, 1); + // preferred endpoints is a region, then reorder the regions and services context.alternativeRegions = regions; + [context.region] = context.alternativeRegions.splice(preferredRegionIndex, 1); + context.alternativeServiceNames = this.serviceNames.slice(); + [context.serviceName] = context.alternativeServiceNames.splice(preferredServiceIndex, 1); + // compatible, remove when make break changes + this.serviceName = context.serviceName; } return Promise.resolve(); }; @@ -138,20 +208,29 @@ RegionsRetryPolicy.prototype._initRegions = function (options) { /** * @param {RegionsRetryPolicyContext} context * @returns {Promise} - * @private + * @protected */ RegionsRetryPolicy.prototype._prepareEndpoints = function (context) { - [context.endpoint, ...context.alternativeEndpoints] = context.region.services[this.serviceName] || []; + [context.endpoint, ...context.alternativeEndpoints] = context.region.services[context.serviceName] || []; while (!context.endpoint) { - if (!context.alternativeRegions.length) { + if (context.alternativeServiceNames.length) { + context.serviceName = context.alternativeServiceNames.shift(); + // compatible, remove when make break changes + this.serviceName = context.serviceName; + [context.endpoint, ...context.alternativeEndpoints] = context.region.services[context.serviceName] || []; + } else if (context.alternativeRegions.length) { + context.region = context.alternativeRegions.shift(); + [context.serviceName, ...context.alternativeServiceNames] = this.serviceNames; + // compatible, remove when make break changes + this.serviceName = context.serviceName; + [context.endpoint, ...context.alternativeEndpoints] = context.region.services[context.serviceName] || []; + } else { return Promise.reject(new Error( 'There isn\'t available endpoint for ' + - this.serviceName + - ' service in any available regions' + this.serviceNames.join(', ') + + ' service(s) in any available regions' )); } - context.region = context.alternativeRegions.shift(); - [context.endpoint, ...context.alternativeEndpoints] = context.region.services[this.serviceName] || []; } return Promise.resolve(); }; diff --git a/qiniu/retry/retrier.js b/qiniu/retry/retrier.js index 6aa24a1..6d96c5c 100644 --- a/qiniu/retry/retrier.js +++ b/qiniu/retry/retrier.js @@ -63,16 +63,16 @@ Retrier.prototype.retry = function (options) { if (!retryPolicy) { retryPolicy = this.retryPolicies.find(p => p.shouldRetry(context)); } - const shouldRetryPromise = this.onBeforeRetry + const couldRetryPromise = this.onBeforeRetry ? this.onBeforeRetry(context, retryPolicy) : retryPolicy !== undefined; return Promise.all([ - shouldRetryPromise, + couldRetryPromise, retryPolicy ]); }) - .then(([shouldRetry, retryPolicy]) => { - if (!shouldRetry) { + .then(([couldRetry, retryPolicy]) => { + if (!couldRetry || !retryPolicy) { return; } context.error = null; diff --git a/qiniu/storage/form.js b/qiniu/storage/form.js index c349b07..972f187 100644 --- a/qiniu/storage/form.js +++ b/qiniu/storage/form.js @@ -14,7 +14,10 @@ const { ResponseWrapper } = require('../httpc/responseWrapper'); const { EndpointsRetryPolicy } = require('../httpc/endpointsRetryPolicy'); const { RegionsRetryPolicy } = require('../httpc/regionsRetryPolicy'); const { Retrier } = require('../retry'); -const { wrapTryCallback } = require('./internal'); +const { + AccUnavailableRetryPolicy, + handleReqCallback +} = require('./internal'); exports.FormUploader = FormUploader; exports.PutExtra = PutExtra; @@ -48,19 +51,41 @@ function _getRegionsRetrier (options) { accessKey }) .then(regionsProvider => { - const retryPolicies = [ - new EndpointsRetryPolicy({ - skipInitContext: true - }), - new RegionsRetryPolicy({ - regionsProvider, - serviceName: SERVICE_NAME.UP - }) - ]; + let retryPolicies; + if (this.config.accelerateUploading) { + retryPolicies = [ + new AccUnavailableRetryPolicy(), + new EndpointsRetryPolicy({ + skipInitContext: true + }), + new RegionsRetryPolicy({ + regionsProvider, + serviceNames: [SERVICE_NAME.UP_ACC, SERVICE_NAME.UP] + }) + ]; + } else { + retryPolicies = [ + new EndpointsRetryPolicy({ + skipInitContext: true + }), + new RegionsRetryPolicy({ + regionsProvider, + serviceNames: [SERVICE_NAME.UP] + }) + ]; + } return new Retrier({ retryPolicies, - onBeforeRetry: context => retryable && context.result.needRetry() + onBeforeRetry: (context, policy) => { + if (context.error) { + return retryable; + } + if (policy instanceof AccUnavailableRetryPolicy) { + return true; + } + return retryable && context.result && context.result.needRetry(); + } }); }); }; @@ -100,19 +125,13 @@ function PutExtra ( * @param { http.IncomingMessage } info */ -/** - * @typedef UploadResult - * @property {any} data - * @property {http.IncomingMessage} resp - */ - /** * @param {string} uploadToken * @param {string | null} key * @param {stream.Readable} fsStream * @param {PutExtra | null} putExtra * @param {reqCallback} callbackFunc - * @returns {Promise} + * @returns {Promise} */ FormUploader.prototype.putStream = function ( uploadToken, @@ -134,7 +153,7 @@ FormUploader.prototype.putStream = function ( // Why need retrier even if retryable is false? // Because the retrier is used to get the endpoints, // which will be initialed by region policy. - return _getRegionsRetrier.call(this, { + const result = _getRegionsRetrier.call(this, { bucketName: util.getBucketFromUptoken(uploadToken), accessKey: util.getAKFromUptoken(uploadToken), retryable: false @@ -149,11 +168,12 @@ FormUploader.prototype.putStream = function ( uploadToken, key, fsStream, - putExtra, - callbackFunc + putExtra ), context })); + handleReqCallback(result, callbackFunc); + return result; }; /** @@ -162,15 +182,14 @@ FormUploader.prototype.putStream = function ( * @param {string} key * @param {Readable} fsStream * @param {PutExtra} putExtra - * @param {reqCallback} callbackFunc + * @returns {Promise} */ function putReq ( upDomain, uploadToken, key, fsStream, - putExtra, - callbackFunc + putExtra ) { const postForm = createMultipartForm( uploadToken, @@ -178,7 +197,6 @@ function putReq ( fsStream, putExtra ); - const wrappedCallback = wrapTryCallback(callbackFunc); return new Promise((resolve, reject) => { rpc.postMultipart( upDomain, @@ -187,14 +205,12 @@ function putReq ( if (err) { err.resp = resp; reject(err); - wrappedCallback(err, data, resp); return; } resolve(new ResponseWrapper({ data, resp })); - wrappedCallback(err, data, resp); } ); }); @@ -226,12 +242,8 @@ FormUploader.prototype.put = function ( } ); - const fsStream = new Readable(); - fsStream.push(body); - fsStream.push(null); - // initial retrier and try upload - return _getRegionsRetrier.call(this, { + const result = _getRegionsRetrier.call(this, { bucketName: util.getBucketFromUptoken(uploadToken), accessKey: util.getAKFromUptoken(uploadToken) }) @@ -250,12 +262,13 @@ FormUploader.prototype.put = function ( uploadToken, key, fsStream, - putExtra, - callbackFunc + putExtra ); }, context })); + handleReqCallback(result, callbackFunc); + return result; }; /** @@ -338,7 +351,7 @@ function createMultipartForm (uploadToken, key, fsStream, putExtra) { * @param {string | null} key 目标文件名 * @param {string} localFile 本地文件路径 * @param {PutExtra | null} putExtra 额外选项 - * @param callbackFunc 回调函数 + * @param {reqCallback} [callbackFunc] 回调函数 * @returns {Promise} */ FormUploader.prototype.putFile = function ( @@ -368,7 +381,7 @@ FormUploader.prototype.putFile = function ( ); // initial retrier and try upload - return _getRegionsRetrier.call(this, { + const result = _getRegionsRetrier.call(this, { bucketName: util.getBucketFromUptoken(uploadToken), accessKey: util.getAKFromUptoken(uploadToken) }) @@ -379,18 +392,18 @@ FormUploader.prototype.putFile = function ( .then(([retrier, context]) => retrier.retry({ func: context => { const fsStream = fs.createReadStream(localFile); - return putReq( context.endpoint.getValue({ scheme: preferredScheme }), uploadToken, key, fsStream, - putExtra, - callbackFunc + putExtra ); }, context })); + handleReqCallback(result, callbackFunc); + return result; }; /** 上传本地文件 diff --git a/qiniu/storage/internal.js b/qiniu/storage/internal.js index 6b68870..9318a23 100644 --- a/qiniu/storage/internal.js +++ b/qiniu/storage/internal.js @@ -6,9 +6,6 @@ const fs = require('fs'); const { RetryPolicy } = require('../retry'); -exports.TokenExpiredRetryPolicy = TokenExpiredRetryPolicy; -exports.wrapTryCallback = wrapTryCallback; - // --- split to files --- // /** * @typedef {RetryPolicyContext} TokenExpiredRetryPolicyContext @@ -114,6 +111,69 @@ TokenExpiredRetryPolicy.prototype.prepareRetry = function (context) { }); }; +exports.TokenExpiredRetryPolicy = TokenExpiredRetryPolicy; + +/** + * @class + * @extends RetryPolicy + * @constructor + */ +function AccUnavailableRetryPolicy () { +} + +AccUnavailableRetryPolicy.prototype = Object.create(RetryPolicy.prototype); +AccUnavailableRetryPolicy.prototype.constructor = AccUnavailableRetryPolicy; + +AccUnavailableRetryPolicy.prototype.initContext = function (context) { + return Promise.resolve(); +}; + +AccUnavailableRetryPolicy.prototype.isAccNotAvailable = function (context) { + try { + return context.result.resp.statusCode === 400 && + context.result.resp.data.error.includes('transfer acceleration is not configured on this bucket'); + } catch (_err) { + return false; + } +}; + +AccUnavailableRetryPolicy.prototype.shouldRetry = function (context) { + if (!context.result) { + return false; + } + if (!context.alternativeServiceNames.length) { + return false; + } + const [nextServiceName] = context.alternativeServiceNames; + if ( + !context.region.services[nextServiceName] || + !context.region.services[nextServiceName].length + ) { + return false; + } + return this.isAccNotAvailable(context); +}; + +AccUnavailableRetryPolicy.prototype.prepareRetry = function (context) { + if (!context.alternativeServiceNames.length) { + return Promise.reject(new Error( + 'No alternative service available.' + )); + } + + context.serviceName = context.alternativeServiceNames.shift(); + [context.endpoint, ...context.alternativeEndpoints] = context.region.services[context.serviceName]; + if (!context.endpoint) { + return Promise.reject(new Error( + 'No alternative endpoint available.' + )); + } + + return Promise.resolve(); +}; + +exports.AccUnavailableRetryPolicy = AccUnavailableRetryPolicy; + /** * @param {Error} err * @param {string} msg @@ -147,3 +207,23 @@ function wrapTryCallback (fn) { } }; } + +/** + * Compatible with callback style + * Could be removed when make break changes. + */ +function handleReqCallback (responseWrapperPromise, callbackFunc) { + if (typeof callbackFunc !== 'function') { + return; + } + const wrappedCallback = wrapTryCallback(callbackFunc); + responseWrapperPromise + .then(({ data, resp }) => { + wrappedCallback(null, data, resp); + }) + .catch(err => { + wrappedCallback(err, null, err.resp); + }); +} + +exports.handleReqCallback = handleReqCallback; diff --git a/qiniu/storage/resume.js b/qiniu/storage/resume.js index 4491d99..7fb01bf 100644 --- a/qiniu/storage/resume.js +++ b/qiniu/storage/resume.js @@ -18,9 +18,10 @@ const { RegionsRetryPolicy } = require('../httpc/regionsRetryPolicy'); const { Retrier } = require('../retry'); const { - wrapTryCallback, + AccUnavailableRetryPolicy, TokenExpiredRetryPolicy, - getNoNeedRetryError + getNoNeedRetryError, + handleReqCallback } = require('./internal'); exports.ResumeUploader = ResumeUploader; @@ -115,7 +116,11 @@ function _getRegionsRetrier (options) { accessKey }) .then(regionsProvider => { + const serviceNames = this.config.accelerateUploading + ? [SERVICE_NAME.UP_ACC, SERVICE_NAME.UP] + : [SERVICE_NAME.UP]; const retryPolicies = [ + new AccUnavailableRetryPolicy(), new TokenExpiredRetryPolicy({ uploadApiVersion, resumeRecordFilePath @@ -125,7 +130,7 @@ function _getRegionsRetrier (options) { }), new RegionsRetryPolicy({ regionsProvider, - serviceName: SERVICE_NAME.UP, + serviceNames, onChangedRegion: () => { try { fs.unlinkSync(resumeRecordFilePath); @@ -139,11 +144,17 @@ function _getRegionsRetrier (options) { return new Retrier({ retryPolicies, - onBeforeRetry: context => { - if (context.error && context.error.noNeedRetry) { - return false; + onBeforeRetry: (context, policy) => { + if (context.error) { + if (context.error.noNeedRetry) { + return false; + } + return retryable; + } + if (policy instanceof AccUnavailableRetryPolicy) { + return true; } - return retryable && context.result.needRetry(); + return retryable && context.result && context.result.needRetry(); } }); }); @@ -185,7 +196,7 @@ ResumeUploader.prototype.putStream = function ( // Why need retrier even if retryable is false? // Because the retrier is used to get the endpoints, // which will be initialed by region policy. - return _getRegionsRetrier.call(this, { + const result = _getRegionsRetrier.call(this, { bucketName: util.getBucketFromUptoken(uploadToken), accessKey: util.getAKFromUptoken(uploadToken), retryable: false, @@ -206,11 +217,14 @@ ResumeUploader.prototype.putStream = function ( key, rsStream, rsStreamLen, - putExtra, - callbackFunc + putExtra ), context })); + + handleReqCallback(result, callbackFunc); + + return result; }; /** @@ -240,7 +254,6 @@ function getResumeRecordInfo (resumeRecordFilePath) { * @param {Readable} rsStream * @param {number} rsStreamLen * @param {PutExtra} putExtra - * @param {reqCallback} callbackFunc */ function putReq ( upEndpoint, @@ -249,8 +262,7 @@ function putReq ( key, rsStream, rsStreamLen, - putExtra, - callbackFunc + putExtra ) { // make block stream const blkStream = rsStream.pipe(new BlockStream({ @@ -275,8 +287,6 @@ function putReq ( throw new Error('part upload version number error'); } - const wrappedCallback = wrapTryCallback(callbackFunc); - // upload parts return new Promise((resolve, reject) => { doPutReq( @@ -298,7 +308,6 @@ function putReq ( if (err) { err.resp = info; reject(err); - wrappedCallback(err, ret, info); return; } if (info.statusCode === 200 && putExtra.resumeRecordFile) { @@ -309,7 +318,6 @@ function putReq ( } } resolve(new ResponseWrapper({ data: ret, resp: info })); - wrappedCallback(err, ret, info); } ); }); @@ -838,7 +846,7 @@ ResumeUploader.prototype.putFile = function ( } ); - return _getRegionsRetrier.call(this, { + const result = _getRegionsRetrier.call(this, { bucketName: util.getBucketFromUptoken(uploadToken), accessKey: util.getAKFromUptoken(uploadToken), @@ -862,8 +870,7 @@ ResumeUploader.prototype.putFile = function ( key, rsStream, rsStreamLen, - putExtra, - callbackFunc + putExtra ); p .then(() => { @@ -877,6 +884,10 @@ ResumeUploader.prototype.putFile = function ( }, context })); + + handleReqCallback(result, callbackFunc); + + return result; }; /** diff --git a/qiniu/storage/rs.js b/qiniu/storage/rs.js index 4f4505f..8f58d79 100644 --- a/qiniu/storage/rs.js +++ b/qiniu/storage/rs.js @@ -11,7 +11,7 @@ const { RegionsRetryPolicy } = require('../httpc/regionsRetryPolicy'); const { Retrier } = require('../retry'); const pkg = require('../../package.json'); -const { wrapTryCallback } = require('./internal'); +const { handleReqCallback } = require('./internal'); exports.BucketManager = BucketManager; exports.PutPolicy = PutPolicy; @@ -75,7 +75,7 @@ function _getRetryPolicies (options) { }), new RegionsRetryPolicy({ regionsProvider, - serviceName + serviceNames: [serviceName] }) ]; } @@ -103,7 +103,12 @@ function _getRegionsRetrier (options) { }); return new Retrier({ retryPolicies, - onBeforeRetry: context => context.result && context.result.needRetry() + onBeforeRetry: context => { + if (context.error) { + return true; + } + return context.result && context.result.needRetry(); + } }); }); } @@ -135,8 +140,12 @@ function _getUcRetrier () { retryPolicies: _getUcRetryPolices.call(this, { ucProvider }), - onBeforeRetry: context => - context.result.needRetry() + onBeforeRetry: context => { + if (context.error) { + return true; + } + return context.result && context.result.needRetry(); + } }); } @@ -144,6 +153,7 @@ function _getUcRetrier () { * @param {string} [options.bucketName] * @param {SERVICE_NAME} options.serviceName * @param {function(RegionsRetryPolicyContext | EndpointsRetryPolicyContext): Promise} options.func + * @param {BucketOperationCallback} [options.callbackFunc] * @returns {Promise} * @private */ @@ -151,23 +161,26 @@ function _tryReq (options) { const { bucketName, serviceName, - func + func, + callbackFunc } = options; if (serviceName === SERVICE_NAME.UC) { const retrier = _getUcRetrier.call(this); - return retrier.initContext() + const result = retrier.initContext() .then(context => retrier.retry({ func, context })); + handleReqCallback(result, callbackFunc); + return result; } if (!bucketName) { return Promise.reject(new Error('Must provide bucket name for non-uc services')); } - return _getRegionsRetrier.call(this, { + const result = _getRegionsRetrier.call(this, { bucketName, serviceName }) @@ -179,6 +192,8 @@ function _tryReq (options) { func, context })); + handleReqCallback(result, callbackFunc); + return result; } /** @@ -197,10 +212,10 @@ BucketManager.prototype.stat = function (bucket, key, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + statOp; return this._httpClient.get({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -226,10 +241,10 @@ BucketManager.prototype.changeMime = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + changeMimeOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -255,10 +270,10 @@ BucketManager.prototype.changeHeaders = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + changeHeadersOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -289,10 +304,10 @@ BucketManager.prototype.move = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + moveOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -323,10 +338,10 @@ BucketManager.prototype.copy = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + copyOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -346,10 +361,10 @@ BucketManager.prototype.delete = function (bucket, key, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + deleteOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -375,10 +390,10 @@ BucketManager.prototype.deleteAfterDays = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + deleteAfterDaysOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -413,10 +428,10 @@ BucketManager.prototype.setObjectLifeCycle = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + setObjectLifecycleOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -440,10 +455,10 @@ BucketManager.prototype.fetch = function (resUrl, bucket, key, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + fetchOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -464,10 +479,10 @@ BucketManager.prototype.prefetch = function (bucket, key, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + prefetchOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -493,10 +508,10 @@ BucketManager.prototype.changeType = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + changeTypeOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -527,10 +542,10 @@ BucketManager.prototype.image = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -548,10 +563,10 @@ BucketManager.prototype.unimage = function (bucket, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -608,10 +623,10 @@ BucketManager.prototype.listPrefix = function (bucket, options, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -670,14 +685,14 @@ BucketManager.prototype.listPrefixV2 = function (bucket, options, callbackFunc) const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post( { - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }, { dataType: 'text' } ); - } + }, + callbackFunc }); }; @@ -726,10 +741,10 @@ BucketManager.prototype.batch = function (operations, callbackFunc) { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqData, - callback: wrapTryCallback(callbackFunc) + data: reqData }); - } + }, + callbackFunc }); }; @@ -950,10 +965,10 @@ BucketManager.prototype.updateObjectStatus = function ( func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + changeStatusOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1007,32 +1022,36 @@ BucketManager.prototype.listBucket = function (options, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + listBucketOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; /** * 创建 bucket * @param {string} bucket 空间名 - * @param {Object} options 选项 - * @param {string} options.regionId 区域 ID + * @param {Object} [options] 选项 + * @param {string} [options.regionId] 区域 ID * @param {BucketOperationCallback} [callbackFunc] 回调函数 * @returns {Promise} */ BucketManager.prototype.createBucket = function (bucket, options, callbackFunc) { - const createBucketOp = `/mkbucketv3/${bucket}/region/${options.regionId}`; + options = options || {}; + let createBucketOp = `/mkbucketv3/${bucket}`; + if (options.regionId) { + createBucketOp += `/region/${options.regionId}`; + } return _tryReq.call(this, { serviceName: SERVICE_NAME.UC, func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + createBucketOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1050,10 +1069,10 @@ BucketManager.prototype.deleteBucket = function (bucket, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + deleteBucketOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1070,10 +1089,10 @@ BucketManager.prototype.getBucketInfo = function (bucket, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + bucketInfoOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1126,10 +1145,10 @@ BucketManager.prototype.putBucketLifecycleRule = function ( const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqSpec, - callback: wrapTryCallback(callbackFunc) + data: reqSpec }); - } + }, + callbackFunc }); }; @@ -1154,10 +1173,10 @@ BucketManager.prototype.deleteBucketLifecycleRule = function (bucket, name, call const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqSpec, - callback: wrapTryCallback(callbackFunc) + data: reqSpec }); - } + }, + callbackFunc }); }; @@ -1196,10 +1215,10 @@ BucketManager.prototype.updateBucketLifecycleRule = function (bucket, options, c const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqSpec, - callback: wrapTryCallback(callbackFunc) + data: reqSpec }); - } + }, + callbackFunc }); }; @@ -1217,10 +1236,10 @@ BucketManager.prototype.getBucketLifecycleRule = function (bucket, callbackFunc) func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.get({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1267,10 +1286,10 @@ BucketManager.prototype.putBucketEvent = function (bucket, options, callbackFunc const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqSpec, - callback: wrapTryCallback(callbackFunc) + data: reqSpec }); - } + }, + callbackFunc }); }; @@ -1304,10 +1323,10 @@ BucketManager.prototype.updateBucketEvent = function (bucket, options, callbackF const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqSpec, - callback: wrapTryCallback(callbackFunc) + data: reqSpec }); - } + }, + callbackFunc }); }; @@ -1324,10 +1343,10 @@ BucketManager.prototype.getBucketEvent = function (bucket, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.get({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1352,10 +1371,10 @@ BucketManager.prototype.deleteBucketEvent = function (bucket, name, callbackFunc const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqSpec, - callback: wrapTryCallback(callbackFunc) + data: reqSpec }); - } + }, + callbackFunc }); }; @@ -1395,10 +1414,10 @@ BucketManager.prototype.putReferAntiLeech = function (bucket, options, callbackF func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1428,10 +1447,10 @@ BucketManager.prototype.putCorsRules = function (bucket, body, callbackFunc) { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ url: requestURL, - data: reqBody, - callback: wrapTryCallback(callbackFunc) + data: reqBody }); - } + }, + callbackFunc }); }; @@ -1448,10 +1467,10 @@ BucketManager.prototype.getCorsRules = function (bucket, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1469,10 +1488,10 @@ BucketManager.prototype.putBucketAccessStyleMode = function (bucket, mode, callb func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1500,10 +1519,10 @@ BucketManager.prototype.putBucketMaxAge = function (bucket, options, callbackFun func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1533,10 +1552,10 @@ BucketManager.prototype.putBucketAccessMode = function (bucket, options, callbac func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1574,10 +1593,10 @@ BucketManager.prototype.putBucketQuota = function (bucket, options, callbackFunc func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1594,10 +1613,10 @@ BucketManager.prototype.getBucketQuota = function (bucket, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1615,10 +1634,10 @@ BucketManager.prototype.listBucketDomains = function (bucket, callbackFunc) { func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + reqOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; @@ -1639,10 +1658,10 @@ BucketManager.prototype.restoreAr = function (entry, freezeAfterDays, callbackFu func: context => { const requestURL = _getEndpointVal.call(this, context.endpoint) + restoreArOp; return this._httpClient.post({ - url: requestURL, - callback: wrapTryCallback(callbackFunc) + url: requestURL }); - } + }, + callbackFunc }); }; diff --git a/test/conf.test.js b/test/conf.test.js index 6d8eee6..805885b 100644 --- a/test/conf.test.js +++ b/test/conf.test.js @@ -1,6 +1,9 @@ const should = require('should'); const qiniu = require('../index'); +const path = require('path'); +const os = require('os'); +const fs = require('fs'); describe('test Config class', function () { const { @@ -191,4 +194,30 @@ describe('test Config class', function () { }); }); }); + + describe('test disable file cache', function () { + it('test disable file cache', function () { + const defaultPersistPath = path.join(os.tmpdir(), 'qn-regions-cache.jsonl'); + try { + fs.unlinkSync(defaultPersistPath); + } catch (e) { + if (e.code !== 'ENOENT') { + throw e; + } + } + + const config = new qiniu.conf.Config(); + config.regionsQueryResultCachePath = null; + + return config.getRegionsProvider({ + bucketName, + accessKey + }) + .then(regionsProvider => regionsProvider.getRegions()) + .then(regions => { + should.ok(regions.length > 0); + should.ok(!fs.existsSync(defaultPersistPath)); + }); + }); + }); }); diff --git a/test/form_up.test.js b/test/form_up.test.js index 7af1233..40b1bc4 100644 --- a/test/form_up.test.js +++ b/test/form_up.test.js @@ -6,6 +6,13 @@ const fs = require('fs'); const qiniu = require('../index.js'); +const { + Endpoint, + Region, + SERVICE_NAME, + StaticRegionsProvider +} = qiniu.httpc; + const { getEnvConfig, checkEnvConfigAndExit, @@ -438,4 +445,98 @@ describe('test form up', function () { }); }); }); + + describe('test form up#accelerateUploading', function () { + const accConfig = new qiniu.conf.Config(); + // accConfig.useHttpsDomain = true; + accConfig.accelerateUploading = true; + const accFormUploader = new qiniu.form_up.FormUploader(accConfig); + const bucketNameWithoutAcc = 'bucket-without-acc-' + Math.floor(Math.random() * 100000); + + before(function () { + return bucketManager.createBucket(bucketNameWithoutAcc); + }); + + it('upload acc normally', function () { + const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); + + const promises = doAndWrapResultPromises(callback => + accFormUploader.putFile(uploadToken, key, testFilePath2, putExtra, callback) + ); + + const checkFunc = ({ data }) => { + data.should.have.keys('key', 'hash'); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc) + .then(() => { + keysToDelete.push(key); + }); + }); + + it('upload acc unavailable fallback to src', function () { + const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); + + const putPolicy = new qiniu.rs.PutPolicy({ + scope: bucketNameWithoutAcc + }); + const uploadToken = putPolicy.uploadToken(mac); + const r1 = Region.fromRegionId('z0'); + r1.services[SERVICE_NAME.UP_ACC] = [ + new Endpoint(`${bucketNameWithoutAcc}.kodo-accelerate.cn-east-1.qiniucs.com`), + new Endpoint('qn-up-acc.fake.qiniu.com') + ]; + accConfig.regionsProvider = r1; + + const promises = doAndWrapResultPromises(callback => + accFormUploader.putFile(uploadToken, key, testFilePath2, putExtra, callback) + ); + + const checkFunc = ({ data }) => { + data.should.have.keys('key', 'hash'); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc) + .then(() => { + keysToDelete.push(key); + }); + }); + + it('upload acc network error fallback to src', function () { + const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); + + const putPolicy = new qiniu.rs.PutPolicy({ + scope: bucketNameWithoutAcc + }); + const uploadToken = putPolicy.uploadToken(mac); + const r1 = Region.fromRegionId('z0'); + r1.services[SERVICE_NAME.UP_ACC] = [ + new Endpoint('qiniu-acc.fake.qiniu.com'), + new Endpoint('qn-up-acc.fake.qiniu.com') + ]; + accConfig.regionsProvider = r1; + + const promises = doAndWrapResultPromises(callback => + accFormUploader.putFile(uploadToken, key, testFilePath2, putExtra) + ); + + const checkFunc = ({ data }) => { + data.should.have.keys('key', 'hash'); + }; + + return promises.native + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc) + .then(() => { + keysToDelete.push(key); + }); + }); + }); }); diff --git a/test/httpc.test.js b/test/httpc.test.js index 22a8ee5..f821a48 100644 --- a/test/httpc.test.js +++ b/test/httpc.test.js @@ -393,6 +393,7 @@ describe('test http module', function () { 'https://up.qiniup.com', 'https://up.qbox.me' ], + [SERVICE_NAME.UP_ACC]: [], [SERVICE_NAME.IO]: [ 'https://iovip.qiniuio.com', 'https://iovip.qbox.me' @@ -456,6 +457,7 @@ describe('test http module', function () { `${preferredScheme}://up-z1.qbox.me`, `${preferredScheme}://upload-z1.qiniup.com` ], + [SERVICE_NAME.UP_ACC]: [], [SERVICE_NAME.IO]: [ `${preferredScheme}://iovip-z1.qiniuio.com`, `${preferredScheme}://iovip-z1.qbox.me` @@ -484,6 +486,100 @@ describe('test http module', function () { should.equal(regionZ1.ttl, -1); should.equal(regionZ1.createTime.getTime(), 0); }); + + it('test clone', function () { + const r = Region.fromRegionId('z0'); + + const rCloned = r.clone(); + + rCloned.regionId = 'z1'; + rCloned.services[SERVICE_NAME.UP_ACC] = [ + new Endpoint('fake-endpoint.qiniu.com') + ]; + + r.regionId.should.eql('z0'); + r.services[SERVICE_NAME.UP_ACC].should.deepEqual([]); + }); + + it('test static merge', function () { + const r1 = Region.fromRegionId('z0'); + const r2 = new Region({ + regionId: 'faker2', + s3RegionId: 'z', + services: { + [SERVICE_NAME.UC]: [ + new Endpoint('fake-uc.qiniu.com') + ], + 'custom-service': [ + new Endpoint('custom-service.example.com') + ] + }, + createTime: new Date(Date.now() - 86400 * 1000), + ttl: 3600 + }); + const regions = [ + r1, + r2 + ]; + + const rMerged = Region.merge(...regions); + rMerged.regionId.should.eql(r1.regionId); + rMerged.s3RegionId.should.eql(r1.s3RegionId); + rMerged.createTime.should.eql(r1.createTime); + rMerged.ttl.should.eql(r1.ttl); + Object.keys(rMerged.services).forEach(serviceName => { + if (serviceName === SERVICE_NAME.UC) { + rMerged.services[serviceName].should.deepEqual([ + ...r1.services[serviceName], + ...r2.services[serviceName] + ]); + return; + } + if (serviceName === 'custom-service') { + rMerged.services[serviceName].should.deepEqual(r2.services[serviceName]); + return; + } + rMerged.services[serviceName].should.deepEqual(r1.services[serviceName]); + }); + }); + + it('test merge', function () { + const r1 = Region.fromRegionId('z0'); + const r2 = new Region({ + regionId: 'faker2', + s3RegionId: 'z', + services: { + [SERVICE_NAME.UC]: [ + new Endpoint('fake-uc.qiniu.com') + ], + 'custom-service': [ + new Endpoint('custom-service.example.com') + ] + }, + createTime: new Date(Date.now() - 86400 * 1000), + ttl: 3600 + }); + + const rMerged = r1.merge(r2); + rMerged.regionId.should.eql(r1.regionId); + rMerged.s3RegionId.should.eql(r1.s3RegionId); + rMerged.createTime.should.eql(r1.createTime); + rMerged.ttl.should.eql(r1.ttl); + Object.keys(rMerged.services).forEach(serviceName => { + if (serviceName === SERVICE_NAME.UC) { + rMerged.services[serviceName].should.deepEqual([ + ...r1.services[serviceName], + ...r2.services[serviceName] + ]); + return; + } + if (serviceName === 'custom-service') { + rMerged.services[serviceName].should.deepEqual(r2.services[serviceName]); + return; + } + rMerged.services[serviceName].should.deepEqual(r1.services[serviceName]); + }); + }); }); describe('test endpoints provider', function () { @@ -1108,7 +1204,7 @@ describe('test http module', function () { }); }); - it('test shouldRetry', function () { + it('test shouldRetry(legacy)', function () { const regionsRetryPolicy = new RegionsRetryPolicy({ regionsProvider: regionsProvider, serviceName: SERVICE_NAME.UP @@ -1125,7 +1221,46 @@ describe('test http module', function () { }); }); - it('test prepareRetry', function () { + it('test shouldRetry(alternativeServiceNames)', function () { + const accRegion = Region.fromRegionId('z0'); + accRegion.services[SERVICE_NAME.UP_ACC] = [ + new Endpoint(`${bucketName}.kodo-accelerate.cn-east-1.qiniucs.com`) + ]; + const regionsRetryPolicy = new RegionsRetryPolicy({ + regionsProvider: accRegion, + serviceNames: [SERVICE_NAME.UP_ACC, SERVICE_NAME.UP] + }); + + const mockedContext = { + error: null, + retried: false + }; + + return regionsRetryPolicy.initContext(mockedContext) + .then(() => { + should.ok(regionsRetryPolicy.shouldRetry(mockedContext)); + }); + }); + + it('test not shouldRetry(alternativeServiceNames)', function () { + const accRegion = Region.fromRegionId('z0'); + const regionsRetryPolicy = new RegionsRetryPolicy({ + regionsProvider: accRegion, + serviceNames: [SERVICE_NAME.UP_ACC, SERVICE_NAME.UP] + }); + + const mockedContext = { + error: null, + retried: false + }; + + return regionsRetryPolicy.initContext(mockedContext) + .then(() => { + should.ok(!regionsRetryPolicy.shouldRetry(mockedContext)); + }); + }); + + it('test shouldRetry(alternativeRegions)', function () { const regionsRetryPolicy = new RegionsRetryPolicy({ regionsProvider: regionsProvider, serviceName: SERVICE_NAME.UP @@ -1138,21 +1273,36 @@ describe('test http module', function () { return regionsRetryPolicy.initContext(mockedContext) .then(() => { - return regions - .reduce((promise, expectedRegion, currentIndex) => { - return promise - .then(() => { - const isLastOne = currentIndex === regions.length - 1; - should.equal(mockedContext.region.regionId, expectedRegion.regionId); - should.deepEqual(mockedContext.endpoint, expectedRegion.services[SERVICE_NAME.UP][0]); - should.deepEqual(mockedContext.alternativeEndpoints, expectedRegion.services[SERVICE_NAME.UP].slice(1)); - if (isLastOne) { - return Promise.resolve(); - } - return regionsRetryPolicy.prepareRetry(mockedContext); - }); - }, Promise.resolve()); - }) + should.equal(regionsRetryPolicy.shouldRetry(mockedContext), true); + }); + }); + + it('test prepareRetry(legacy', function () { + const regionsRetryPolicy = new RegionsRetryPolicy({ + regionsProvider: regionsProvider, + serviceName: SERVICE_NAME.UP + }); + + const mockedContext = { + error: null, + retried: false + }; + + return regionsRetryPolicy.initContext(mockedContext) + .then(() => regions.reduce( + (promise, expectedRegion, currentIndex) => + promise.then(() => { + const isLastOne = currentIndex === regions.length - 1; + should.equal(mockedContext.region.regionId, expectedRegion.regionId); + should.deepEqual(mockedContext.endpoint, expectedRegion.services[SERVICE_NAME.UP][0]); + should.deepEqual(mockedContext.alternativeEndpoints, expectedRegion.services[SERVICE_NAME.UP].slice(1)); + if (isLastOne) { + return Promise.resolve(); + } + return regionsRetryPolicy.prepareRetry(mockedContext); + }), + Promise.resolve() + )) .then(() => { should.equal(regionsRetryPolicy.shouldRetry(mockedContext), false); }); diff --git a/test/resume_up.test.js b/test/resume_up.test.js index 026efe3..5af593d 100644 --- a/test/resume_up.test.js +++ b/test/resume_up.test.js @@ -7,6 +7,13 @@ const crypto = require('crypto'); const http = require('http'); const qiniu = require('../index.js'); + +const { + Endpoint, + Region, + SERVICE_NAME +} = qiniu.httpc; + const { getEnvConfig, checkEnvConfigAndExit, @@ -507,4 +514,129 @@ describe('test resume up', function () { }); }); }); + + describe('test resume up#accelerateUploading', function () { + const accConfig = new qiniu.conf.Config(); + // accConfig.useHttpsDomain = true; + accConfig.accelerateUploading = true; + const accResumeUploader = new qiniu.resume_up.ResumeUploader(accConfig); + const bucketNameWithoutAcc = 'bucket-without-acc-' + Math.floor(Math.random() * 100000); + + before(function () { + return bucketManager.createBucket(bucketNameWithoutAcc); + }); + + testParams.forEach(function (testParam) { + const { + version, + partSize, + mimeType + } = testParam; + const msg = `params(${JSON.stringify(testParam)})`; + + // default is v1. v1 not support setting part size, skipping. + if ( + ( + version === undefined || + version === 'v1' + ) && + partSize !== undefined + ) { + return; + } + + const putExtra = new qiniu.resume_up.PutExtra(); + if (version !== undefined) { + putExtra.version = version; + } + if (partSize !== undefined) { + putExtra.partSize = partSize; + } + if (mimeType !== undefined) { + putExtra.mimeType = mimeType; + } + + it(`upload acc normally; ${msg}`, function () { + const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); + + const promises = doAndWrapResultPromises(callback => + accResumeUploader.putFile(uploadToken, key, testFilePath, putExtra, callback) + ); + + const checkFunc = ({ data }) => { + data.should.have.keys('key', 'hash'); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc) + .then(() => { + keysToDelete.push(key); + }); + }); + + it(`upload acc unavailable fallback to src; ${msg}`, function () { + const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); + + const putPolicy = new qiniu.rs.PutPolicy({ + scope: bucketNameWithoutAcc + }); + const uploadToken = putPolicy.uploadToken(mac); + const r1 = Region.fromRegionId('z0'); + r1.services[SERVICE_NAME.UP_ACC] = [ + new Endpoint(`${bucketNameWithoutAcc}.kodo-accelerate.cn-east-1.qiniucs.com`), + new Endpoint('qn-up-acc.fake.qiniu.com') + ]; + accConfig.regionsProvider = r1; + + const promises = doAndWrapResultPromises(callback => + accResumeUploader.putFile(uploadToken, key, testFilePath, putExtra, callback) + ); + + const checkFunc = ({ data }) => { + data.should.have.keys('key', 'hash'); + }; + + return promises.callback + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc) + .then(() => { + keysToDelete.push(key); + }); + }); + + it(`upload acc network error fallback to src; ${msg}`, function () { + const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); + + const putPolicy = new qiniu.rs.PutPolicy({ + scope: bucketNameWithoutAcc + }); + const uploadToken = putPolicy.uploadToken(mac); + const r1 = Region.fromRegionId('z0'); + r1.services[SERVICE_NAME.UP_ACC] = [ + new Endpoint('qiniu-acc.fake.qiniu.com'), + new Endpoint('qn-up-acc.fake.qiniu.com') + ]; + accConfig.regionsProvider = r1; + + const promises = doAndWrapResultPromises(callback => + accResumeUploader.putFile(uploadToken, key, testFilePath, putExtra) + ); + + const checkFunc = ({ data }) => { + data.should.have.keys('key', 'hash'); + }; + + return promises.native + .then(checkFunc) + .then(() => promises.native) + .then(checkFunc) + .then(() => { + keysToDelete.push(key); + }); + }); + }); + }); }); diff --git a/test/rs.test.js b/test/rs.test.js index 87ec6c6..f3cf312 100644 --- a/test/rs.test.js +++ b/test/rs.test.js @@ -427,7 +427,7 @@ describe('test start bucket manager', function () { }); }); - describe('test listBucket', function () { + describe('test listBuckets', function () { it('test listBucket', function () { const promises = doAndWrapResultPromises(callback => bucketManager.listBucket(callback) @@ -526,6 +526,9 @@ describe('test start bucket manager', function () { const promises = doAndWrapResultPromises(callback => bucketManager.createBucket( targetBucketName, + { + regionId: 'z0' + }, callback ) ); diff --git a/test/storage_internal.test.js b/test/storage_internal.test.js index 12c61a6..67635f1 100644 --- a/test/storage_internal.test.js +++ b/test/storage_internal.test.js @@ -3,7 +3,16 @@ const should = require('should'); const fs = require('fs'); const path = require('path'); +const qiniu = require('../index'); + +const { + Endpoint, + Region, + SERVICE_NAME +} = qiniu.httpc; + const { + AccUnavailableRetryPolicy, TokenExpiredRetryPolicy } = require('../qiniu/storage/internal'); @@ -125,4 +134,147 @@ describe('test upload internal module', function () { }); }); }); + + describe('test AccUnavailableRetryPolicy', function () { + it('test AccUnavailableRetryPolicy should retry', function () { + const accUnavailableRetryPolicy = new AccUnavailableRetryPolicy(); + + const mockedContext = {}; + + return accUnavailableRetryPolicy.initContext(mockedContext) + .then(() => { + mockedContext.serviceName = SERVICE_NAME.UP_ACC; + mockedContext.alternativeServiceNames = [SERVICE_NAME.UP]; + mockedContext.region = Region.fromRegionId('z0'); + + mockedContext.result = { + data: null, + resp: { + statusCode: 400, + data: { + error: 'transfer acceleration is not configured on this bucket' + } + } + }; + + accUnavailableRetryPolicy.shouldRetry(mockedContext) + .should.true(); + }); + }); + + it('test AccUnavailableRetryPolicy should not retry by no alternative service', function () { + const accUnavailableRetryPolicy = new AccUnavailableRetryPolicy(); + + const mockedContext = {}; + + return accUnavailableRetryPolicy.initContext(mockedContext) + .then(() => { + mockedContext.serviceName = SERVICE_NAME.UP; + mockedContext.alternativeServiceNames = []; + mockedContext.region = Region.fromRegionId('z0'); + + mockedContext.result = { + data: null, + resp: { + statusCode: 400, + data: { + error: 'transfer acceleration is not configured on this bucket' + } + } + }; + + accUnavailableRetryPolicy.shouldRetry(mockedContext) + .should.false(); + }); + }); + + it('test AccUnavailableRetryPolicy should not retry by no alternative endpoint', function () { + const accUnavailableRetryPolicy = new AccUnavailableRetryPolicy(); + + const mockedContext = {}; + + return accUnavailableRetryPolicy.initContext(mockedContext) + .then(() => { + mockedContext.serviceName = SERVICE_NAME.UP_ACC; + mockedContext.alternativeServiceNames = [SERVICE_NAME.UP]; + mockedContext.region = Region.fromRegionId('z0'); + mockedContext.region.services[SERVICE_NAME.UP] = []; + + mockedContext.result = { + data: null, + resp: { + statusCode: 400, + data: { + error: 'transfer acceleration is not configured on this bucket' + } + } + }; + + accUnavailableRetryPolicy.shouldRetry(mockedContext) + .should.false(); + }); + }); + + it('test AccUnavailableRetryPolicy should not retry by no other error', function () { + const accUnavailableRetryPolicy = new AccUnavailableRetryPolicy(); + + const mockedContext = {}; + + return accUnavailableRetryPolicy.initContext(mockedContext) + .then(() => { + mockedContext.serviceName = SERVICE_NAME.UP_ACC; + mockedContext.alternativeServiceNames = [SERVICE_NAME.UP]; + mockedContext.region = Region.fromRegionId('z0'); + + mockedContext.result = { + data: null, + resp: { + statusCode: 400, + data: 'Not Found' + } + }; + + accUnavailableRetryPolicy.shouldRetry(mockedContext) + .should.false(); + }); + }); + + it('test AccUnavailableRetryPolicy prepare retry', function () { + const accUnavailableRetryPolicy = new AccUnavailableRetryPolicy(); + const region = Region.fromRegionId('z0'); + + const mockedContext = {}; + + return accUnavailableRetryPolicy.initContext(mockedContext) + .then(() => { + mockedContext.region = region; + mockedContext.region[SERVICE_NAME.UP_ACC] = [ + new Endpoint('some.fake.qiniu.com'), + new Endpoint('others.fake.qiniu.com') + ]; + mockedContext.serviceName = SERVICE_NAME.UP_ACC; + mockedContext.alternativeServiceNames = [SERVICE_NAME.UP]; + + mockedContext.result = { + data: null, + resp: { + statusCode: 400, + data: { + error: 'transfer acceleration is not configured on this bucket' + } + } + }; + + accUnavailableRetryPolicy.shouldRetry(mockedContext) + .should.true(); + + return accUnavailableRetryPolicy.prepareRetry(mockedContext); + }) + .then(() => { + const [expectEndpoint, ...expectAlternativeEndpoints] = region.services[SERVICE_NAME.UP]; + should.deepEqual(mockedContext.endpoint, expectEndpoint); + should.deepEqual(mockedContext.alternativeEndpoints, expectAlternativeEndpoints); + }); + }); + }); }); From ab8fd3a65bad40a969955165aa795ac7eb2a6cf8 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Tue, 4 Jun 2024 21:16:35 +0800 Subject: [PATCH 07/18] fix: test cases for default endpoint provider and acc uploading --- test/conf.test.js | 4 ++-- test/form_up.test.js | 49 +++++++++++++++++++++++++++--------------- test/resume_up.test.js | 48 +++++++++++++++++++++++++---------------- 3 files changed, 64 insertions(+), 37 deletions(-) diff --git a/test/conf.test.js b/test/conf.test.js index 805885b..391e580 100644 --- a/test/conf.test.js +++ b/test/conf.test.js @@ -186,9 +186,9 @@ describe('test Config class', function () { .then(endpoints => { const endpointsValues = endpoints.map(e => e.getValue()); should.deepEqual(endpointsValues, [ + `${preferredScheme}://uc.qiniuapi.com`, `${preferredScheme}://kodo-config.qiniuapi.com`, - `${preferredScheme}://uc.qbox.me`, - `${preferredScheme}://api.qiniu.com` + `${preferredScheme}://uc.qbox.me` ]); }); }); diff --git a/test/form_up.test.js b/test/form_up.test.js index 40b1bc4..9eec275 100644 --- a/test/form_up.test.js +++ b/test/form_up.test.js @@ -9,8 +9,7 @@ const qiniu = require('../index.js'); const { Endpoint, Region, - SERVICE_NAME, - StaticRegionsProvider + SERVICE_NAME } = qiniu.httpc; const { @@ -67,6 +66,10 @@ describe('test form up', function () { // delete all the files uploaded const keysToDelete = []; after(function () { + if (!keysToDelete.length) { + return; + } + const deleteOps = keysToDelete.map(k => qiniu.rs.deleteOp(bucketName, k) ); @@ -448,15 +451,33 @@ describe('test form up', function () { describe('test form up#accelerateUploading', function () { const accConfig = new qiniu.conf.Config(); - // accConfig.useHttpsDomain = true; + accConfig.useHttpsDomain = true; accConfig.accelerateUploading = true; - const accFormUploader = new qiniu.form_up.FormUploader(accConfig); const bucketNameWithoutAcc = 'bucket-without-acc-' + Math.floor(Math.random() * 100000); + const accPutPolicy = new qiniu.rs.PutPolicy({ + scope: bucketNameWithoutAcc + }); + const accUploadToken = accPutPolicy.uploadToken(mac); + const accFormUploader = new qiniu.form_up.FormUploader(accConfig); + const accKeysToDelete = []; before(function () { return bucketManager.createBucket(bucketNameWithoutAcc); }); + after(function () { + if (!accKeysToDelete.length) { + return; + } + return bucketManager.batch(accKeysToDelete.map(k => qiniu.rs.deleteOp(bucketNameWithoutAcc, k))) + .then(({ data, resp }) => { + if (!Array.isArray(data)) { + console.log(resp); + } + return bucketManager.deleteBucket(bucketNameWithoutAcc); + }); + }); + it('upload acc normally', function () { const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); @@ -464,7 +485,9 @@ describe('test form up', function () { accFormUploader.putFile(uploadToken, key, testFilePath2, putExtra, callback) ); - const checkFunc = ({ data }) => { + const checkFunc = ({ data, resp }) => { + const isAccelerateUploading = (resp.requestUrls || []).some(url => url.includes('kodo-accelerate')); + should.ok(isAccelerateUploading, `should using acc host, but requestUrls: ${JSON.stringify(resp.requestUrls)}`); data.should.have.keys('key', 'hash'); }; @@ -480,10 +503,6 @@ describe('test form up', function () { it('upload acc unavailable fallback to src', function () { const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); - const putPolicy = new qiniu.rs.PutPolicy({ - scope: bucketNameWithoutAcc - }); - const uploadToken = putPolicy.uploadToken(mac); const r1 = Region.fromRegionId('z0'); r1.services[SERVICE_NAME.UP_ACC] = [ new Endpoint(`${bucketNameWithoutAcc}.kodo-accelerate.cn-east-1.qiniucs.com`), @@ -492,7 +511,7 @@ describe('test form up', function () { accConfig.regionsProvider = r1; const promises = doAndWrapResultPromises(callback => - accFormUploader.putFile(uploadToken, key, testFilePath2, putExtra, callback) + accFormUploader.putFile(accUploadToken, key, testFilePath2, putExtra, callback) ); const checkFunc = ({ data }) => { @@ -504,17 +523,13 @@ describe('test form up', function () { .then(() => promises.native) .then(checkFunc) .then(() => { - keysToDelete.push(key); + accKeysToDelete.push(key); }); }); it('upload acc network error fallback to src', function () { const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); - const putPolicy = new qiniu.rs.PutPolicy({ - scope: bucketNameWithoutAcc - }); - const uploadToken = putPolicy.uploadToken(mac); const r1 = Region.fromRegionId('z0'); r1.services[SERVICE_NAME.UP_ACC] = [ new Endpoint('qiniu-acc.fake.qiniu.com'), @@ -523,7 +538,7 @@ describe('test form up', function () { accConfig.regionsProvider = r1; const promises = doAndWrapResultPromises(callback => - accFormUploader.putFile(uploadToken, key, testFilePath2, putExtra) + accFormUploader.putFile(accUploadToken, key, testFilePath2, putExtra) ); const checkFunc = ({ data }) => { @@ -535,7 +550,7 @@ describe('test form up', function () { .then(() => promises.native) .then(checkFunc) .then(() => { - keysToDelete.push(key); + accKeysToDelete.push(key); }); }); }); diff --git a/test/resume_up.test.js b/test/resume_up.test.js index 5af593d..4f68cbd 100644 --- a/test/resume_up.test.js +++ b/test/resume_up.test.js @@ -107,14 +107,14 @@ describe('test resume up', function () { const keysToDelete = []; after(function () { + if (!keysToDelete.length) { + return; + } + const deleteOps = keysToDelete.map(k => qiniu.rs.deleteOp(bucketName, k) ); - if (!deleteOps.length) { - return; - } - return bucketManager.batch(deleteOps) .then(({ data, resp }) => { if (!Array.isArray(data)) { @@ -517,15 +517,33 @@ describe('test resume up', function () { describe('test resume up#accelerateUploading', function () { const accConfig = new qiniu.conf.Config(); - // accConfig.useHttpsDomain = true; + accConfig.useHttpsDomain = true; accConfig.accelerateUploading = true; const accResumeUploader = new qiniu.resume_up.ResumeUploader(accConfig); const bucketNameWithoutAcc = 'bucket-without-acc-' + Math.floor(Math.random() * 100000); + const accKeysToDelete = []; + const accPutPolicy = new qiniu.rs.PutPolicy({ + scope: bucketNameWithoutAcc + }); + const accUploadToken = accPutPolicy.uploadToken(mac); before(function () { return bucketManager.createBucket(bucketNameWithoutAcc); }); + after(function () { + if (!accKeysToDelete.length) { + return; + } + return bucketManager.batch(accKeysToDelete.map(k => qiniu.rs.deleteOp(bucketNameWithoutAcc, k))) + .then(({ data, resp }) => { + if (!Array.isArray(data)) { + console.log(resp); + } + return bucketManager.deleteBucket(bucketNameWithoutAcc); + }); + }); + testParams.forEach(function (testParam) { const { version, @@ -563,7 +581,9 @@ describe('test resume up', function () { accResumeUploader.putFile(uploadToken, key, testFilePath, putExtra, callback) ); - const checkFunc = ({ data }) => { + const checkFunc = ({ data, resp }) => { + const isAccelerateUploading = (resp.requestUrls || []).some(url => url.includes('kodo-accelerate')); + should.ok(isAccelerateUploading, `should using acc host, but requestUrls: ${JSON.stringify(resp.requestUrls)}`); data.should.have.keys('key', 'hash'); }; @@ -579,10 +599,6 @@ describe('test resume up', function () { it(`upload acc unavailable fallback to src; ${msg}`, function () { const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); - const putPolicy = new qiniu.rs.PutPolicy({ - scope: bucketNameWithoutAcc - }); - const uploadToken = putPolicy.uploadToken(mac); const r1 = Region.fromRegionId('z0'); r1.services[SERVICE_NAME.UP_ACC] = [ new Endpoint(`${bucketNameWithoutAcc}.kodo-accelerate.cn-east-1.qiniucs.com`), @@ -591,7 +607,7 @@ describe('test resume up', function () { accConfig.regionsProvider = r1; const promises = doAndWrapResultPromises(callback => - accResumeUploader.putFile(uploadToken, key, testFilePath, putExtra, callback) + accResumeUploader.putFile(accUploadToken, key, testFilePath, putExtra, callback) ); const checkFunc = ({ data }) => { @@ -603,17 +619,13 @@ describe('test resume up', function () { .then(() => promises.native) .then(checkFunc) .then(() => { - keysToDelete.push(key); + accKeysToDelete.push(key); }); }); it(`upload acc network error fallback to src; ${msg}`, function () { const key = 'storage_putFile_acc_test' + Math.floor(Math.random() * 100000); - const putPolicy = new qiniu.rs.PutPolicy({ - scope: bucketNameWithoutAcc - }); - const uploadToken = putPolicy.uploadToken(mac); const r1 = Region.fromRegionId('z0'); r1.services[SERVICE_NAME.UP_ACC] = [ new Endpoint('qiniu-acc.fake.qiniu.com'), @@ -622,7 +634,7 @@ describe('test resume up', function () { accConfig.regionsProvider = r1; const promises = doAndWrapResultPromises(callback => - accResumeUploader.putFile(uploadToken, key, testFilePath, putExtra) + accResumeUploader.putFile(accUploadToken, key, testFilePath, putExtra) ); const checkFunc = ({ data }) => { @@ -634,7 +646,7 @@ describe('test resume up', function () { .then(() => promises.native) .then(checkFunc) .then(() => { - keysToDelete.push(key); + accKeysToDelete.push(key); }); }); }); From fa92d96aa8f2fd2f4cbfaf6894205b6e2d454447 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Mon, 17 Jun 2024 16:10:29 +0800 Subject: [PATCH 08/18] feat: add ResumeRecorder --- index.d.ts | 40 +++++- package.json | 1 + qiniu/storage/internal.js | 35 ++--- qiniu/storage/resume.js | 285 ++++++++++++++++++++++++++++++-------- qiniu/util.js | 4 + test/resume_up.test.js | 103 ++++++++++++-- 6 files changed, 368 insertions(+), 100 deletions(-) diff --git a/index.d.ts b/index.d.ts index 3cd586e..14f22d8 100644 --- a/index.d.ts +++ b/index.d.ts @@ -421,13 +421,14 @@ export declare namespace resume_up { /** * @default null + * @deprecated 使用 `resumeRecorder` 与 `resumeKey` 代替 */ - resumeRecordFile?: string + resumeRecordFile?: string | null /** * @default null */ - progressCallback?: (uploadBytes: number, totalBytes: number) => void + progressCallback?: ((uploadBytes: number, totalBytes: number) => void) | null /** * @default v1 @@ -444,6 +445,18 @@ export declare namespace resume_up { */ metadata?: Record + /** + * 断点续传记录器,请通过 `createResumeRecorder` 或 `createResumeRecorderSync` 获取,优先级比 `resumeRecordFile` 低 + * @default null + */ + resumeRecorder?: ResumeRecorder + + /** + * 断点续传记录文件的具体文件名,不设置时会由当次上传自动生成 + * @default null + */ + resumeKey?: string | null + /** * 上传可选参数 * @param fname 请求体中的文件的名称 @@ -454,11 +467,32 @@ export declare namespace resume_up { * @param partSize 分片上传v2必传字段 默认大小为4MB 分片大小范围为1 MB - 1 GB * @param version 分片上传版本 目前支持v1/v2版本 默认v1 * @param metadata 元数据设置,参数名称必须以 x-qn-meta-${name}: 开头 + * @param resumeRecorder 断点续传记录器,请通过 `createResumeRecorder` 或 `createResumeRecorderSync` 获取,优先级比 `resumeRecordFile` 低 + * @param resumeKey 断点续传记录文件的具体文件名,不设置时会由当次上传自动生成,推荐不设置 */ constructor(fname?: string, params?: Record, mimeType?: string, resumeRecordFile?: string, progressCallback?: (uploadBytes: number, totalBytes: number) => void, - partSize?:number, version?:string, metadata?: Record); + partSize?:number, version?:string, metadata?: Record, + resumeRecorder?: ResumeRecorder, resumeKey?: string); } + + /** + * 当前仅支持了同步调用这一不推荐的使用方式,暂不公开具体内部信息,仅供类型匹配使用 + */ + abstract class ResumeRecorder { + } + + /** + * + * @param baseDirPath 默认值为 `os.tmpdir()` + */ + function createResumeRecorder (baseDirPath?: string): Promise + + /** + * TODO: 是否公开这个不推荐的同步实现? + * `createResumeRecorder` 的同步版本 + */ + function createResumeRecorderSync (baseDirPath?: string): ResumeRecorder } export declare namespace util { diff --git a/package.json b/package.json index 9842204..c332815 100644 --- a/package.json +++ b/package.json @@ -58,6 +58,7 @@ "encodeurl": "^1.0.1", "formstream": "^1.1.0", "mime": "^2.4.4", + "mkdirp": "^0.5.5", "mockdate": "^3.0.5", "tunnel-agent": "^0.6.0", "typescript": "^4.9.4", diff --git a/qiniu/storage/internal.js b/qiniu/storage/internal.js index 9318a23..9374b13 100644 --- a/qiniu/storage/internal.js +++ b/qiniu/storage/internal.js @@ -2,8 +2,6 @@ // DO NOT use this file, unless you know what you're doing. // Because its API may make broken change for internal usage. -const fs = require('fs'); - const { RetryPolicy } = require('../retry'); // --- split to files --- // @@ -18,31 +16,22 @@ const { RetryPolicy } = require('../retry'); * @extends RetryPolicy * @param {Object} options * @param {string} options.uploadApiVersion - * @param {string} options.resumeRecordFilePath + * @param {function} options.recordDeleteHandler + * @param {function} options.recordExistsHandler * @param {number} [options.maxRetryTimes] * @constructor */ function TokenExpiredRetryPolicy (options) { this.id = Symbol(this.constructor.name); this.uploadApiVersion = options.uploadApiVersion; - this.resumeRecordFilePath = options.resumeRecordFilePath; + this.recordDeleteHandler = options.recordDeleteHandler; + this.recordExistsHandler = options.recordExistsHandler; this.maxRetryTimes = options.maxRetryTimes || 1; } TokenExpiredRetryPolicy.prototype = Object.create(RetryPolicy.prototype); TokenExpiredRetryPolicy.prototype.constructor = TokenExpiredRetryPolicy; -/** - * @param {string} resumeRecordFilePath - * @returns {boolean} - */ -TokenExpiredRetryPolicy.prototype.isResumedUpload = function (resumeRecordFilePath) { - if (!resumeRecordFilePath) { - return false; - } - return fs.existsSync(resumeRecordFilePath); -}; - /** * @param {Object} context * @returns {Promise} @@ -50,8 +39,7 @@ TokenExpiredRetryPolicy.prototype.isResumedUpload = function (resumeRecordFilePa TokenExpiredRetryPolicy.prototype.initContext = function (context) { context[this.id] = { retriedTimes: 0, - uploadApiVersion: this.uploadApiVersion, - resumeRecordFilePath: this.resumeRecordFilePath + uploadApiVersion: this.uploadApiVersion }; return Promise.resolve(); }; @@ -63,13 +51,12 @@ TokenExpiredRetryPolicy.prototype.initContext = function (context) { TokenExpiredRetryPolicy.prototype.shouldRetry = function (context) { const { retriedTimes, - uploadApiVersion, - resumeRecordFilePath + uploadApiVersion } = context[this.id]; if ( retriedTimes >= this.maxRetryTimes || - !this.isResumedUpload(resumeRecordFilePath) + !this.recordExistsHandler() ) { return false; } @@ -99,15 +86,13 @@ TokenExpiredRetryPolicy.prototype.shouldRetry = function (context) { */ TokenExpiredRetryPolicy.prototype.prepareRetry = function (context) { context[this.id].retriedTimes += 1; - const resumeRecordFilePath = context[this.id].resumeRecordFilePath; return new Promise(resolve => { - if (!resumeRecordFilePath) { + if (!this.recordExistsHandler()) { resolve(); return; } - fs.unlink(resumeRecordFilePath, _err => { - resolve(); - }); + this.recordDeleteHandler(); + resolve(); }); }; diff --git a/qiniu/storage/resume.js b/qiniu/storage/resume.js index 7fb01bf..efa8003 100644 --- a/qiniu/storage/resume.js +++ b/qiniu/storage/resume.js @@ -1,10 +1,14 @@ +const os = require('os'); const fs = require('fs'); const path = require('path'); +const crypto = require('crypto'); const mime = require('mime'); const getCrc32 = require('crc32'); const destroy = require('destroy'); const BlockStream = require('block-stream2'); +// use the native option `recursive` when min version of Node.js update to ≥ v10.12.0 +const mkdirp = require('mkdirp'); const conf = require('../conf'); const util = require('../util'); @@ -26,6 +30,8 @@ const { exports.ResumeUploader = ResumeUploader; exports.PutExtra = PutExtra; +exports.createResumeRecorder = createResumeRecorder; +exports.createResumeRecorderSync = createResumeRecorderSync; /** * @param {conf.Config} [config] @@ -57,11 +63,13 @@ function ResumeUploader (config) { * @param {string} [fname] 请求体中的文件的名称 * @param {Object} [params] 额外参数设置,参数名称必须以x:开头 * @param {string | null} [mimeType] 指定文件的mimeType - * @param {string | null} [resumeRecordFile] 断点续传的已上传的部分信息记录文件路径 + * @param {string | null} [resumeRecordFile] DEPRECATED: 使用 `` 与 `` 代替;断点续传的已上传的部分信息记录文件路径 * @param {function(number, number):void} [progressCallback] 上传进度回调,回调参数为 (uploadBytes, totalBytes) * @param {number} [partSize] 分片上传v2必传字段 默认大小为4MB 分片大小范围为1 MB - 1 GB * @param {'v1' | 'v2'} [version] 分片上传版本 目前支持v1/v2版本 默认v1 * @param {Object} [metadata] 元数据设置,参数名称必须以 x-qn-meta-${name}: 开头 + * @param {JsonFileRecorder} [resumeRecorder] 通过 `createResumeRecorder` 或 `createResumeRecorderSync` 获取,优先级比 `resumeRecordFile` 低 + * @param {string} [resumeKey] 断点续传记录文件的具体文件名,不设置时会由当次上传自动生成 */ function PutExtra ( fname, @@ -71,7 +79,9 @@ function PutExtra ( progressCallback, partSize, version, - metadata + metadata, + resumeRecorder, + resumeKey ) { this.fname = fname || ''; this.params = params || {}; @@ -81,6 +91,8 @@ function PutExtra ( this.partSize = partSize || conf.BLOCK_SIZE; this.version = version || 'v1'; this.metadata = metadata || {}; + this.resumeRecorder = resumeRecorder || null; + this.resumeKey = resumeKey || null; } /** @@ -89,8 +101,9 @@ function PutExtra ( * @param {string} options.accessKey * @param {string} options.bucketName * @param {boolean} [options.retryable] - * @param {'v1' | 'v2' | string} options.uploadApiVersion - * @param {string} [options.resumeRecordFilePath] + * @param {'v1' | 'v2' | string} [options.uploadApiVersion] + * @param {JsonFileRecorder} [options.resumeRecorder] + * @param {string} [options.resumeKey] */ function _getRegionsRetrier (options) { const { @@ -99,16 +112,19 @@ function _getRegionsRetrier (options) { retryable = true, uploadApiVersion, - resumeRecordFilePath + resumeRecorder, + resumeKey } = options; const preferredScheme = this.config.useHttpsDomain ? 'https' : 'http'; - - const resumeInfo = getResumeRecordInfo(resumeRecordFilePath); let preferredEndpoints; - if (resumeInfo && Array.isArray(resumeInfo.upDomains)) { - preferredEndpoints = resumeInfo.upDomains.map(d => - new Endpoint(d, { defaultScheme: preferredScheme })); + const isResumeAvailable = Boolean(resumeRecorder && resumeKey); + if (isResumeAvailable) { + const resumeInfo = resumeRecorder.getSync(resumeKey); + if (resumeInfo && Array.isArray(resumeInfo.upDomains)) { + preferredEndpoints = resumeInfo.upDomains.map(d => + new Endpoint(d, { defaultScheme: preferredScheme })); + } } return this.config.getRegionsProvider({ @@ -123,7 +139,18 @@ function _getRegionsRetrier (options) { new AccUnavailableRetryPolicy(), new TokenExpiredRetryPolicy({ uploadApiVersion, - resumeRecordFilePath + recordExistsHandler: () => { + if (!isResumeAvailable) { + return; + } + resumeRecorder.hasSync(resumeKey); + }, + recordDeleteHandler: () => { + if (!isResumeAvailable) { + return; + } + resumeRecorder.deleteSync(resumeKey); + } }), new EndpointsRetryPolicy({ skipInitContext: true @@ -132,11 +159,10 @@ function _getRegionsRetrier (options) { regionsProvider, serviceNames, onChangedRegion: () => { - try { - fs.unlinkSync(resumeRecordFilePath); - } catch (_e) { - // ignore + if (!isResumeAvailable) { + return; } + resumeRecorder.deleteSync(resumeKey); }, preferredEndpoints }) @@ -199,11 +225,10 @@ ResumeUploader.prototype.putStream = function ( const result = _getRegionsRetrier.call(this, { bucketName: util.getBucketFromUptoken(uploadToken), accessKey: util.getAKFromUptoken(uploadToken), - retryable: false, + retryable: false // useless by not retryable - uploadApiVersion: putExtra.version, - resumeRecordFilePath: putExtra.resumeRecordFile + // uploadApiVersion: putExtra.version, }) .then(retrier => Promise.all([ retrier, @@ -227,25 +252,6 @@ ResumeUploader.prototype.putStream = function ( return result; }; -/** - * @param {string} resumeRecordFilePath - * @returns {undefined | Object.} - */ -function getResumeRecordInfo (resumeRecordFilePath) { - // get resume record info - let result; - // read resumeRecordFile - if (resumeRecordFilePath) { - try { - const resumeRecords = fs.readFileSync(resumeRecordFilePath).toString(); - result = JSON.parse(resumeRecords); - } catch (e) { - e.code !== 'ENOENT' && console.error(e); - } - } - return result; -} - /** * @param {Endpoint} upEndpoint * @param {string} uploadToken @@ -271,7 +277,7 @@ function putReq ( })); // get resume record info - const blkputRets = getResumeRecordInfo(putExtra.resumeRecordFile); + const blkputRets = putExtra.resumeRecorder && putExtra.resumeRecorder.getSync(putExtra.resumeKey); const totalBlockNum = Math.ceil(rsStreamLen / putExtra.partSize); // select upload version @@ -310,14 +316,13 @@ function putReq ( reject(err); return; } - if (info.statusCode === 200 && putExtra.resumeRecordFile) { - try { - fs.unlinkSync(putExtra.resumeRecordFile); - } catch (_e) { - // ignore - } + if (info.statusCode === 200 && putExtra.resumeRecorder && putExtra.resumeKey) { + putExtra.resumeRecorder.deleteSync(putExtra.resumeKey); } - resolve(new ResponseWrapper({ data: ret, resp: info })); + resolve(new ResponseWrapper({ + data: ret, + resp: info + })); } ); }); @@ -428,11 +433,8 @@ function putReqV1 (sourceOptions, uploadOptions, callbackFunc) { const blkputRet = respBody; finishedCtxList.push(blkputRet.ctx); finishedBlkPutRets.parts.push(blkputRet); - if (putExtra.resumeRecordFile) { - const contents = JSON.stringify(finishedBlkPutRets); - fs.writeFileSync(putExtra.resumeRecordFile, contents, { - encoding: 'utf-8' - }); + if (putExtra.resumeRecorder && putExtra.resumeKey) { + putExtra.resumeRecorder.setSync(putExtra.resumeKey, finishedBlkPutRets); } if (putExtra.progressCallback) { try { @@ -707,11 +709,8 @@ function resumeUploadV2 ( partNumber: partNumber }; finishedEtags.etags.push(blockStatus); - if (putExtra.resumeRecordFile) { - const contents = JSON.stringify(finishedEtags); - fs.writeFileSync(putExtra.resumeRecordFile, contents, { - encoding: 'utf-8' - }); + if (putExtra.resumeRecorder && putExtra.resumeKey) { + putExtra.resumeRecorder.setSync(putExtra.resumeKey, finishedEtags); } if (putExtra.progressCallback) { try { @@ -839,19 +838,25 @@ ResumeUploader.prototype.putFile = function ( putExtra.fname = path.basename(localFile); } + const akFromToken = util.getAKFromUptoken(uploadToken); + const bucketFromToken = util.getBucketFromUptoken(uploadToken); putExtra = getDefaultPutExtra( putExtra, { - key + accessKey: akFromToken, + bucketName: bucketFromToken, + key, + filePath: localFile } ); const result = _getRegionsRetrier.call(this, { - bucketName: util.getBucketFromUptoken(uploadToken), - accessKey: util.getAKFromUptoken(uploadToken), + accessKey: akFromToken, + bucketName: bucketFromToken, uploadApiVersion: putExtra.version, - resumeRecordFilePath: putExtra.resumeRecordFile + resumeRecorder: putExtra.resumeRecorder, + resumeKey: putExtra.resumeKey }) .then(retrier => Promise.all([ retrier, @@ -909,13 +914,17 @@ ResumeUploader.prototype.putFileWithoutKey = function ( /** * @param {PutExtra} putExtra * @param {Object} options + * @param {string} [options.accessKey] + * @param {string} [options.bucketName] * @param {string | null} [options.key] + * @param {string} [options.filePath] * @returns {PutExtra} */ function getDefaultPutExtra (putExtra, options) { options = options || {}; - putExtra = putExtra || new PutExtra(); + // assign to a new object to make the modification later + putExtra = Object.assign(new PutExtra(), putExtra); if (!putExtra.mimeType) { putExtra.mimeType = 'application/octet-stream'; } @@ -928,5 +937,161 @@ function getDefaultPutExtra (putExtra, options) { putExtra.version = 'v1'; } + if (putExtra.resumeRecordFile) { + const parsedPath = path.parse(path.resolve(putExtra.resumeRecordFile)); + putExtra.resumeRecorder = createResumeRecorderSync(parsedPath.dir); + putExtra.resumeKey = parsedPath.name; + } + + // generate `resumeKey` if not exists + if ( + putExtra.resumeRecorder && + !putExtra.resumeKey && + options.filePath && + options.accessKey && + options.bucketName + ) { + let fileLastModify; + try { + fileLastModify = options.filePath && fs.statSync(options.filePath).mtimeMs.toString(); + } catch (_err) { + fileLastModify = ''; + } + const recordValuesToHash = [ + putExtra.version, + options.accessKey, + `${options.bucketName}:${options.key}`, + options.filePath, + fileLastModify + ]; + putExtra.resumeKey = putExtra.resumeRecorder.generateKey(recordValuesToHash); + } + return putExtra; } + +/** + * @class + * @param {string} baseDirPath + * @constructor + */ +function JsonFileRecorder (baseDirPath) { + this.baseDirPath = baseDirPath; +} + +/** + * @param {string} key + * @param {Object.} data + */ +JsonFileRecorder.prototype.setSync = function (key, data) { + const filePath = path.join(this.baseDirPath, key); + const contents = JSON.stringify(data); + fs.writeFileSync( + filePath, + contents, + { + encoding: 'utf-8', + mode: 0o600 + } + ); +}; + +/** + * @param key + * @returns {undefined | Object.} + */ +JsonFileRecorder.prototype.getSync = function (key) { + const filePath = path.join(this.baseDirPath, key); + let result; + try { + const recordContent = fs.readFileSync( + filePath, + { + encoding: 'utf-8' + } + ).toString(); + result = JSON.parse(recordContent); + } catch (_err) { + // pass + } + return result; +}; + +JsonFileRecorder.prototype.hasSync = function (key) { + const filePath = path.join(this.baseDirPath, key); + try { + return fs.existsSync(filePath); + } catch (_err) { + return false; + } +}; + +JsonFileRecorder.prototype.deleteSync = function (key) { + const filePath = path.join(this.baseDirPath, key); + try { + fs.unlinkSync(filePath); + } catch (_err) { + // pass + } +}; + +JsonFileRecorder.prototype.generateKey = function (fields) { + const h = crypto.createHash('sha1'); + fields.forEach(v => { + h.update(v); + }); + return `qn-resume-${h.digest('hex')}.json`; +}; + +function createResumeRecorder (baseDirPath) { + if (baseDirPath) { + // make baseDirPath absolute + baseDirPath = path.resolve(baseDirPath); + } else { + // set default baseDirPath to os temp + baseDirPath = os.tmpdir(); + } + // with mkdirp on Windows the root-level ENOENT errors can lead to infinite regress + // remove the fs.access when instead mkdirp with the native option `recursive` + return new Promise((resolve, reject) => { + fs.access( + path.parse(baseDirPath).root, + fs.constants.R_OK | fs.constants.W_OK, + err => { + if (err) { + reject(err); + return; + } + resolve(); + } + ); + }) + .then(() => new Promise((resolve, reject) => { + mkdirp(baseDirPath, { mode: 0o700 }, err => { + if (err) { + reject(err); + return; + } + resolve(); + }); + })) + .then(() => new JsonFileRecorder(baseDirPath)); +} + +function createResumeRecorderSync (baseDirPath) { + if (baseDirPath) { + // make baseDirPath absolute + baseDirPath = path.resolve(baseDirPath); + } else { + // set default baseDirPath to os temp + baseDirPath = os.tmpdir(); + } + // with mkdirp on Windows the root-level ENOENT errors can lead to infinite regress + // remove the fs.access when instead mkdirp with the native option `recursive` + fs.accessSync( + path.parse(baseDirPath).root, + fs.constants.F_OK + ); + mkdirp.sync(baseDirPath, { mode: 0o700 }); + return new JsonFileRecorder(baseDirPath); +} diff --git a/qiniu/util.js b/qiniu/util.js index c1678b8..0509cee 100644 --- a/qiniu/util.js +++ b/qiniu/util.js @@ -380,3 +380,7 @@ exports.prepareZone = function (ctx, accessKey, bucket, callback) { }); } }; + +exports.writeOrCreateSync = function () { + +}; diff --git a/test/resume_up.test.js b/test/resume_up.test.js index 4f68cbd..144a7f5 100644 --- a/test/resume_up.test.js +++ b/test/resume_up.test.js @@ -22,6 +22,7 @@ const { doAndWrapResultPromises, parametrize } = require('./conftest'); +const { createResumeRecorderSync } = require('../qiniu/storage/resume'); const testFilePath = path.join(os.tmpdir(), 'nodejs-sdk-test.bin'); @@ -406,20 +407,43 @@ describe('test resume up', function () { { name: 'fileSizeMB', values: [2, 4, 6, 10] + }, + { + name: 'resumeRecorderOption', + values: [ + undefined, // resume record is disabled, default + { + baseDirPath: path.join(os.tmpdir(), 'SDKCustomDir'), + resumeKey: undefined + }, + { + baseDirPath: path.join(os.tmpdir(), 'SDKCustomDir'), + resumeKey: 'some-resume-key.json' + } + ] + }, + { + name: 'resumeRecordFile', + values: [ + undefined, + path.join(os.tmpdir(), 'some-resume-record-file.json') + ] } ); const filepathListToDelete = []; after(function () { return Promise.all( - filepathListToDelete.map(p => new Promise(resolve => { - fs.unlink(p, err => { - if (err && err.code !== 'ENOENT') { - console.log(`unlink ${p} failed`, err); - } - resolve(); - }); - })) + filepathListToDelete + .filter(p => p) + .map(p => new Promise(resolve => { + fs.unlink(p, err => { + if (err && err.code !== 'ENOENT') { + console.log(`unlink ${p} failed`, err); + } + resolve(); + }); + })) ); }); @@ -427,7 +451,9 @@ describe('test resume up', function () { const { version, partSize, - fileSizeMB + fileSizeMB, + resumeRecorderOption, + resumeRecordFile } = testParam; const msg = `params(${JSON.stringify(testParam)})`; @@ -446,7 +472,16 @@ describe('test resume up', function () { const key = 'storage_putStream_resume_test' + Math.floor(Math.random() * 100000); const putExtra = new qiniu.resume_up.PutExtra(); - putExtra.resumeRecordFile = path.join(os.tmpdir(), key + '.resume.json'); + + if (resumeRecordFile) { + putExtra.resumeRecordFile = resumeRecordFile; + } + + if (resumeRecorderOption) { + putExtra.resumeRecorder = createResumeRecorderSync(resumeRecorderOption.baseDirPath); + putExtra.resumeKey = resumeRecorderOption.resumeKey; + } + if (version !== undefined) { putExtra.version = version; } @@ -479,9 +514,19 @@ describe('test resume up', function () { }) .then(() => { // try to upload from resume point + const couldResume = Boolean(putExtra.resumeRecordFile || putExtra.resumeRecorder); + let isFirstPart = true; putExtra.progressCallback = (uploaded, _total) => { - if (uploaded / partSize <= 1) { - throw new Error('not resumed'); + if (!isFirstPart) { + return; + } + isFirstPart = false; + if (couldResume && uploaded / partSize <= 1) { + throw new Error('should resume'); + } + if (!couldResume && uploaded / partSize > 1) { + console.log('lihs debug:', { couldResume, uploaded }); + throw new Error('should not resume'); } }; return doAndWrapResultPromises(callback => @@ -497,6 +542,40 @@ describe('test resume up', function () { const checkFunc = ({ data }) => { data.should.have.keys('key', 'hash'); + if (resumeRecordFile) { + should.ok(!fs.existsSync(putExtra.resumeRecordFile)); + } else if (resumeRecorderOption) { + if (resumeRecorderOption.resumeKey) { + should.ok(!fs.existsSync( + path.join( + resumeRecorderOption.baseDirPath, + resumeRecorderOption.resumeKey + ) + )); + } else { + should.exist(putExtra.resumeRecorder); + let fileLastModify; + try { + fileLastModify = filepath && fs.statSync(filepath).mtimeMs.toString(); + } catch (_err) { + fileLastModify = ''; + } + const recordValuesToHash = [ + putExtra.version, + accessKey, + `${bucketName}:${key}`, + filepath, + fileLastModify + ]; + const expectResumeKey = putExtra.resumeRecorder.generateKey(recordValuesToHash); + should.ok(!fs.existsSync( + path.join( + resumeRecorderOption.baseDirPath, + expectResumeKey + ) + )); + } + } }; let promises = null; From 70a9677428532b13a5aa847974665a406e514db1 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Mon, 17 Jun 2024 16:21:36 +0800 Subject: [PATCH 09/18] test: reduce cases for resume uploading and fix cases --- test/resume_up.test.js | 58 ++++++++++++++++++++++++++--------- test/storage_internal.test.js | 26 +++++++++++++--- 2 files changed, 65 insertions(+), 19 deletions(-) diff --git a/test/resume_up.test.js b/test/resume_up.test.js index 144a7f5..0e4b53c 100644 --- a/test/resume_up.test.js +++ b/test/resume_up.test.js @@ -132,6 +132,7 @@ describe('test resume up', function () { scope: bucketName }; const putPolicy = new qiniu.rs.PutPolicy(options); + putPolicy.expires = 7200; putPolicy.returnBody = '{"key":$(key),"hash":$(etag),"fname":$(fname),"var_1":$(x:var_1),"var_2":$(x:var_2)}'; const uploadToken = putPolicy.uploadToken(mac); const resumeUploader = new qiniu.resume_up.ResumeUploader(config); @@ -388,7 +389,7 @@ describe('test resume up', function () { }); describe('test resume up#putFile resume', function () { - const testParams = parametrize( + const testResumeParams = parametrize( { name: 'version', values: [ @@ -411,7 +412,6 @@ describe('test resume up', function () { { name: 'resumeRecorderOption', values: [ - undefined, // resume record is disabled, default { baseDirPath: path.join(os.tmpdir(), 'SDKCustomDir'), resumeKey: undefined @@ -421,15 +421,35 @@ describe('test resume up', function () { resumeKey: 'some-resume-key.json' } ] - }, - { - name: 'resumeRecordFile', - values: [ - undefined, - path.join(os.tmpdir(), 'some-resume-record-file.json') - ] } - ); + ) + .concat(parametrize( + { + name: 'version', + values: [ + undefined, + 'v1', + 'v2' + ] + }, + { + name: 'partSize', + values: [ + undefined, + 6 * 1024 * 1024 + ] + }, + { + name: 'fileSizeMB', + values: [2, 4, 6, 10] + }, + { + name: 'resumeRecordFile', + values: [ + path.join(os.tmpdir(), 'some-resume-record-file.json') + ] + } + )); const filepathListToDelete = []; after(function () { @@ -447,7 +467,7 @@ describe('test resume up', function () { ); }); - testParams.forEach(testParam => { + testResumeParams.forEach(testParam => { const { version, partSize, @@ -595,14 +615,15 @@ describe('test resume up', function () { }); describe('test resume up#accelerateUploading', function () { - const accConfig = new qiniu.conf.Config(); + let accConfig = new qiniu.conf.Config(); accConfig.useHttpsDomain = true; accConfig.accelerateUploading = true; - const accResumeUploader = new qiniu.resume_up.ResumeUploader(accConfig); + let accResumeUploader = new qiniu.resume_up.ResumeUploader(accConfig); const bucketNameWithoutAcc = 'bucket-without-acc-' + Math.floor(Math.random() * 100000); const accKeysToDelete = []; const accPutPolicy = new qiniu.rs.PutPolicy({ - scope: bucketNameWithoutAcc + scope: bucketNameWithoutAcc, + expires: 7200 }); const accUploadToken = accPutPolicy.uploadToken(mac); @@ -610,9 +631,16 @@ describe('test resume up', function () { return bucketManager.createBucket(bucketNameWithoutAcc); }); + beforeEach(function () { + accConfig = new qiniu.conf.Config(); + accConfig.useHttpsDomain = true; + accConfig.accelerateUploading = true; + accResumeUploader = new qiniu.resume_up.ResumeUploader(accConfig); + }); + after(function () { if (!accKeysToDelete.length) { - return; + return bucketManager.deleteBucket(bucketNameWithoutAcc); } return bucketManager.batch(accKeysToDelete.map(k => qiniu.rs.deleteOp(bucketNameWithoutAcc, k))) .then(({ data, resp }) => { diff --git a/test/storage_internal.test.js b/test/storage_internal.test.js index 67635f1..2dd2208 100644 --- a/test/storage_internal.test.js +++ b/test/storage_internal.test.js @@ -19,6 +19,20 @@ const { describe('test upload internal module', function () { describe('test TokenExpiredRetryPolicy', function () { const resumeRecordFilePath = path.join(process.cwd(), 'fake-progress-record'); + const recordExistsHandler = () => { + try { + return fs.existsSync(resumeRecordFilePath); + } catch (_err) { + return false; + } + }; + const recordDeleteHandler = () => { + try { + fs.unlinkSync(resumeRecordFilePath); + } catch (_err) { + // pass; + } + }; beforeEach(function () { const fd = fs.openSync(resumeRecordFilePath, 'w'); @@ -36,7 +50,8 @@ describe('test upload internal module', function () { it('test TokenExpiredRetryPolicy should not retry', function () { const tokenExpiredRetryPolicy = new TokenExpiredRetryPolicy({ uploadApiVersion: 'v1', - resumeRecordFilePath + recordDeleteHandler, + recordExistsHandler }); const mockedContext = {}; @@ -57,7 +72,8 @@ describe('test upload internal module', function () { it('test TokenExpiredRetryPolicy should not by maxRetriedTimes', function () { const tokenExpiredRetryPolicy = new TokenExpiredRetryPolicy({ uploadApiVersion: 'v1', - resumeRecordFilePath, + recordDeleteHandler, + recordExistsHandler, maxRetryTimes: 2 }); @@ -89,7 +105,8 @@ describe('test upload internal module', function () { it('test TokenExpiredRetryPolicy should retry v1', function () { const tokenExpiredRetryPolicy = new TokenExpiredRetryPolicy({ uploadApiVersion: 'v1', - resumeRecordFilePath + recordDeleteHandler, + recordExistsHandler }); const mockedContext = {}; @@ -113,7 +130,8 @@ describe('test upload internal module', function () { it('test TokenExpiredRetryPolicy should retry v2', function () { const tokenExpiredRetryPolicy = new TokenExpiredRetryPolicy({ uploadApiVersion: 'v2', - resumeRecordFilePath + recordDeleteHandler, + recordExistsHandler }); const mockedContext = {}; From 11c7824e5ec1ed9c4fa4b84b92e6d88885fba871 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Thu, 20 Jun 2024 14:49:05 +0800 Subject: [PATCH 10/18] fix(type): HttpClient methods type and update some comments --- index.d.ts | 21 +++++++++++---------- qiniu/conf.js | 2 +- qiniu/httpc/region.js | 2 ++ qiniu/httpc/regionsRetryPolicy.js | 2 +- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/index.d.ts b/index.d.ts index 14f22d8..6e2fcf6 100644 --- a/index.d.ts +++ b/index.d.ts @@ -477,20 +477,20 @@ export declare namespace resume_up { } /** - * 当前仅支持了同步调用这一不推荐的使用方式,暂不公开具体内部信息,仅供类型匹配使用 + * 历史原因其方法当前仅支持了同步调用这一不推荐的使用方式,暂不公开具体内部信息,仅供 TypeScript 类型检查使用。 + * 实际不存在这个类,未来会变更为 interface。 */ abstract class ResumeRecorder { } /** * - * @param baseDirPath 默认值为 `os.tmpdir()` + * @param baseDirPath 默认值为 `os.tmpdir()`,该方法若 baseDirPath 不存在将自动创建 */ function createResumeRecorder (baseDirPath?: string): Promise /** - * TODO: 是否公开这个不推荐的同步实现? - * `createResumeRecorder` 的同步版本 + * `createResumeRecorder` 的同步版本,不推荐使用 */ function createResumeRecorderSync (baseDirPath?: string): ResumeRecorder } @@ -700,17 +700,17 @@ export declare namespace httpc { middlewares?: middleware.Middleware[]; } - interface GetOptions extends ReqOpts { + interface GetOptions extends Omit, 'urllibOptions'> { params: Record; headers: Record; } - interface PostOptions extends ReqOpts { + interface PostOptions extends Omit, 'urllibOptions'> { data: string | Buffer | Readable; headers: Record; } - interface PutOptions extends ReqOpts { + interface PutOptions extends Omit, 'urllibOptions'> { data: string | Buffer | Readable; headers: Record } @@ -721,9 +721,9 @@ export declare namespace httpc { middlewares: middleware.Middleware[]; constructor(options: HttpClientOptions) sendRequest(requestOptions: ReqOpts): Promise - get(getOptions: GetOptions): Promise - post(postOptions: PostOptions): Promise - put(putOptions: PutOptions): Promise + get(getOptions: GetOptions, urllibOptions?: RequestOptions): Promise + post(postOptions: PostOptions, urllibOptions?: RequestOptions): Promise + put(putOptions: PutOptions, urllibOptions?: RequestOptions): Promise } // endpoint.js @@ -1353,6 +1353,7 @@ export declare namespace rs { * @param callbackFunc */ listBucket(callbackFunc?: callback): Promise> + listBucket(options: { shared: string, tagCondition: Record }, callbackFunc?: callback): Promise> /** * 创建空间 diff --git a/qiniu/conf.js b/qiniu/conf.js index cfd775e..976ae1a 100644 --- a/qiniu/conf.js +++ b/qiniu/conf.js @@ -64,7 +64,7 @@ const Config = (function () { * @constructor * @param {Object} [options] * @param {boolean} [options.useHttpsDomain] - * @param {boolean} [options.accelerateUploading] should active the domains before using + * @param {boolean} [options.accelerateUploading] enable accelerate uploading. should active the domains in portal before using * @param {EndpointsProvider} [options.ucEndpointsProvider] * @param {EndpointsProvider} [options.queryRegionsEndpointsProvider] * @param {RegionsProvider} [options.regionsProvider] diff --git a/qiniu/httpc/region.js b/qiniu/httpc/region.js index ce0dc4d..114a45f 100644 --- a/qiniu/httpc/region.js +++ b/qiniu/httpc/region.js @@ -283,6 +283,7 @@ Region.merge = function (...regions) { const [source, ...rest] = regions; const target = source.clone(); rest.forEach(s => { + // use Object.values when min version of Node.js update to ≥ v7.5.0 Object.keys(s.services).forEach(n => { if (!target.services[n]) { target.services[n] = s.services[n].map(endpoint => endpoint.clone()); @@ -306,6 +307,7 @@ Region.prototype.getRegions = function () { }; Region.prototype.clone = function () { + // use Object.entries when min version of Node.js update to ≥ v7.5.0 const services = Object.keys(this.services).reduce((s, n) => { s[n] = this.services[n].map(endpoint => endpoint.clone()); return s; diff --git a/qiniu/httpc/regionsRetryPolicy.js b/qiniu/httpc/regionsRetryPolicy.js index 79dba6f..45a8eb8 100644 --- a/qiniu/httpc/regionsRetryPolicy.js +++ b/qiniu/httpc/regionsRetryPolicy.js @@ -194,7 +194,7 @@ RegionsRetryPolicy.prototype._initRegions = function (options) { }); context.alternativeRegions = regions; } else { - // preferred endpoints is a region, then reorder the regions and services + // preferred endpoints in a known region, then reorder the regions and services context.alternativeRegions = regions; [context.region] = context.alternativeRegions.splice(preferredRegionIndex, 1); context.alternativeServiceNames = this.serviceNames.slice(); From 64ffaf9fc05993599626b5e979274673a3d9d507 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Thu, 20 Jun 2024 14:53:03 +0800 Subject: [PATCH 11/18] docs: update CHANGELOG.md and bump version to 7.13.0 --- CHANGELOG.md | 11 +++++++++++ package.json | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c96f1cc..1d3ce41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ ## CHANGE LOG +## 7.13.0 +- 对象存储,新增空间级别上传加速开关 +- 对象存储,优化断点续传开启方式 +- 对象存储,优化回调签名验证函数,新增兼容 Qiniu 签名 +- 对象存储,修复上传失败无法完成自动重试 + - 在 Node.js 18 及以上触发 + - 7.12.0 引入 +- 对象存储,新增空间的创建、删除、按标签列举 +- 对象存储,调整查询区域主备域名 +- 修复内部 HttpClient 部分方法参数类型声明不正确 + ## 7.12.0 - 对象存储,新增支持 Promise 风格异步 - 对象存储,修复分片上传 v1 在特定条件可能无法从断点继续上传 diff --git a/package.json b/package.json index c332815..f3ae9a4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "qiniu", - "version": "7.12.0", + "version": "7.13.0", "description": "Node wrapper for Qiniu Resource (Cloud) Storage API", "main": "index.js", "directories": { From d3be391ae86fb0171d719d7db000b44bc3f5870b Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Tue, 2 Jul 2024 18:31:05 +0800 Subject: [PATCH 12/18] feat: add hosts to generate resume file name supplementary adjustment for fa92d96 --- qiniu/storage/resume.js | 240 ++++++++++++++++++++++++---------------- qiniu/util.js | 4 - test/httpc.test.js | 2 +- test/resume_up.test.js | 52 +++++---- 4 files changed, 180 insertions(+), 118 deletions(-) diff --git a/qiniu/storage/resume.js b/qiniu/storage/resume.js index efa8003..8de3aaa 100644 --- a/qiniu/storage/resume.js +++ b/qiniu/storage/resume.js @@ -17,6 +17,7 @@ const rpc = require('../rpc'); const { SERVICE_NAME } = require('../httpc/region'); const { ResponseWrapper } = require('../httpc/responseWrapper'); const { Endpoint } = require('../httpc/endpoint'); +const { StaticRegionsProvider } = require('../httpc/regionsProvider'); const { EndpointsRetryPolicy } = require('../httpc/endpointsRetryPolicy'); const { RegionsRetryPolicy } = require('../httpc/regionsRetryPolicy'); const { Retrier } = require('../retry'); @@ -86,6 +87,7 @@ function PutExtra ( this.fname = fname || ''; this.params = params || {}; this.mimeType = mimeType || null; + // @deprecated use resumeRecorder and resumeKey instead this.resumeRecordFile = resumeRecordFile || null; this.progressCallback = progressCallback || null; this.partSize = partSize || conf.BLOCK_SIZE; @@ -100,56 +102,83 @@ function PutExtra ( * @param {Object} options * @param {string} options.accessKey * @param {string} options.bucketName - * @param {boolean} [options.retryable] - * @param {'v1' | 'v2' | string} [options.uploadApiVersion] - * @param {JsonFileRecorder} [options.resumeRecorder] - * @param {string} [options.resumeKey] + * @param {string} [options.key] + * @param {string} [options.filePath] + * @param {PutExtra} options.putExtra + * + * @returns Retrier */ function _getRegionsRetrier (options) { const { - bucketName, accessKey, - retryable = true, + bucketName, + key, + filePath, - uploadApiVersion, - resumeRecorder, - resumeKey + putExtra } = options; const preferredScheme = this.config.useHttpsDomain ? 'https' : 'http'; - let preferredEndpoints; - const isResumeAvailable = Boolean(resumeRecorder && resumeKey); - if (isResumeAvailable) { - const resumeInfo = resumeRecorder.getSync(resumeKey); - if (resumeInfo && Array.isArray(resumeInfo.upDomains)) { - preferredEndpoints = resumeInfo.upDomains.map(d => - new Endpoint(d, { defaultScheme: preferredScheme })); - } + + let regionsProviderPromise = this.config.getRegionsProvider({ + accessKey, + bucketName + }); + + // generate resume key, if there is a recorder but not resume key + if (putExtra.resumeRecorder && !putExtra.resumeKey) { + regionsProviderPromise = regionsProviderPromise + .then(regionsProvider => regionsProvider.getRegions()) + .then(regions => { + if (!regions || !regions.length) { + return Promise.reject(new Error(`no region available for the bucket "${bucketName}"`)); + } + const upAccEndpoints = regions[0].services[SERVICE_NAME.UP_ACC] || []; + const upEndpoints = regions[0].services[SERVICE_NAME.UP] || []; + const upHosts = upAccEndpoints.concat(upEndpoints).map(e => e.host); + putExtra.resumeKey = putExtra.resumeRecorder.generateKeySync({ + hosts: upHosts, + accessKey: accessKey, + bucketName: bucketName, + key: key, + filePath: filePath, + version: putExtra.version, + partSize: putExtra.partSize + }); + return new StaticRegionsProvider(regions); + }); } - return this.config.getRegionsProvider({ - bucketName, - accessKey - }) + return regionsProviderPromise .then(regionsProvider => { + // handle preferred endpoints + let preferredEndpoints; + if (putExtra.resumeRecorder && putExtra.resumeKey) { + const resumeInfo = putExtra.resumeRecorder.getSync(putExtra.resumeKey); + if (resumeInfo && Array.isArray(resumeInfo.upDomains)) { + preferredEndpoints = resumeInfo.upDomains.map(d => + new Endpoint(d, { defaultScheme: preferredScheme })); + } + } + const serviceNames = this.config.accelerateUploading ? [SERVICE_NAME.UP_ACC, SERVICE_NAME.UP] : [SERVICE_NAME.UP]; const retryPolicies = [ new AccUnavailableRetryPolicy(), new TokenExpiredRetryPolicy({ - uploadApiVersion, + uploadApiVersion: putExtra.version, recordExistsHandler: () => { - if (!isResumeAvailable) { + if (!putExtra.resumeRecorder || !putExtra.resumeKey) { return; } - resumeRecorder.hasSync(resumeKey); + putExtra.resumeRecorder.hasSync(putExtra.resumeKey); }, recordDeleteHandler: () => { - if (!isResumeAvailable) { + if (!putExtra.resumeRecorder || !putExtra.resumeKey) { return; } - resumeRecorder.deleteSync(resumeKey); + putExtra.resumeRecorder.deleteSync(putExtra.resumeKey); } }), new EndpointsRetryPolicy({ @@ -159,10 +188,10 @@ function _getRegionsRetrier (options) { regionsProvider, serviceNames, onChangedRegion: () => { - if (!isResumeAvailable) { + if (!putExtra.resumeRecorder || !putExtra.resumeKey) { return; } - resumeRecorder.deleteSync(resumeKey); + putExtra.resumeRecorder.deleteSync(putExtra.resumeKey); }, preferredEndpoints }) @@ -175,12 +204,12 @@ function _getRegionsRetrier (options) { if (context.error.noNeedRetry) { return false; } - return retryable; + return true; } if (policy instanceof AccUnavailableRetryPolicy) { return true; } - return retryable && context.result && context.result.needRetry(); + return context.result && context.result.needRetry(); } }); }); @@ -219,33 +248,39 @@ ResumeUploader.prototype.putStream = function ( } ); - // Why need retrier even if retryable is false? - // Because the retrier is used to get the endpoints, - // which will be initialed by region policy. - const result = _getRegionsRetrier.call(this, { - bucketName: util.getBucketFromUptoken(uploadToken), - accessKey: util.getAKFromUptoken(uploadToken), - retryable: false + const bucketName = util.getBucketFromUptoken(uploadToken); + const accessKey = util.getAKFromUptoken(uploadToken); - // useless by not retryable - // uploadApiVersion: putExtra.version, + const result = this.config.getRegionsProvider({ + bucketName, + accessKey }) - .then(retrier => Promise.all([ - retrier, - retrier.initContext() - ])) - .then(([retrier, context]) => retrier.retry({ - func: context => putReq( - context.endpoint, + .then(regionsProvider => regionsProvider.getRegions()) + .then(regions => { + if (!regions || !regions.length) { + return Promise.reject(new Error('no region available for the bucket', bucketName)); + } + const preferService = this.config.accelerateUploading + ? SERVICE_NAME.UP_ACC + : SERVICE_NAME.UP; + if ( + !regions[0].services || + !regions[0].services[preferService] || + !regions[0].services[preferService].length + ) { + return Promise.reject(new Error('no endpoint available for the bucket', bucketName)); + } + const endpoint = regions[0].services[preferService][0]; + return putReq( + endpoint, preferredScheme, uploadToken, key, rsStream, rsStreamLen, putExtra - ), - context - })); + ); + }); handleReqCallback(result, callbackFunc); @@ -838,38 +873,36 @@ ResumeUploader.prototype.putFile = function ( putExtra.fname = path.basename(localFile); } - const akFromToken = util.getAKFromUptoken(uploadToken); - const bucketFromToken = util.getBucketFromUptoken(uploadToken); + const accessKey = util.getAKFromUptoken(uploadToken); + const bucketName = util.getBucketFromUptoken(uploadToken); + putExtra = getDefaultPutExtra( putExtra, { - accessKey: akFromToken, - bucketName: bucketFromToken, - key, - filePath: localFile + key } ); const result = _getRegionsRetrier.call(this, { - accessKey: akFromToken, - bucketName: bucketFromToken, + accessKey, + bucketName, + key, + filePath: localFile, - uploadApiVersion: putExtra.version, - resumeRecorder: putExtra.resumeRecorder, - resumeKey: putExtra.resumeKey + putExtra }) .then(retrier => Promise.all([ retrier, retrier.initContext() ])) .then(([retrier, context]) => retrier.retry({ - func: context => { + func: ctx => { const rsStream = fs.createReadStream(localFile, { highWaterMark: conf.BLOCK_SIZE }); const rsStreamLen = fs.statSync(localFile).size; const p = putReq( - context.endpoint, + ctx.endpoint, preferredScheme, uploadToken, key, @@ -914,10 +947,7 @@ ResumeUploader.prototype.putFileWithoutKey = function ( /** * @param {PutExtra} putExtra * @param {Object} options - * @param {string} [options.accessKey] - * @param {string} [options.bucketName] * @param {string | null} [options.key] - * @param {string} [options.filePath] * @returns {PutExtra} */ function getDefaultPutExtra (putExtra, options) { @@ -943,30 +973,6 @@ function getDefaultPutExtra (putExtra, options) { putExtra.resumeKey = parsedPath.name; } - // generate `resumeKey` if not exists - if ( - putExtra.resumeRecorder && - !putExtra.resumeKey && - options.filePath && - options.accessKey && - options.bucketName - ) { - let fileLastModify; - try { - fileLastModify = options.filePath && fs.statSync(options.filePath).mtimeMs.toString(); - } catch (_err) { - fileLastModify = ''; - } - const recordValuesToHash = [ - putExtra.version, - options.accessKey, - `${options.bucketName}:${options.key}`, - options.filePath, - fileLastModify - ]; - putExtra.resumeKey = putExtra.resumeRecorder.generateKey(recordValuesToHash); - } - return putExtra; } @@ -1001,9 +1007,9 @@ JsonFileRecorder.prototype.setSync = function (key, data) { * @returns {undefined | Object.} */ JsonFileRecorder.prototype.getSync = function (key) { - const filePath = path.join(this.baseDirPath, key); let result; try { + const filePath = path.join(this.baseDirPath, key); const recordContent = fs.readFileSync( filePath, { @@ -1018,8 +1024,8 @@ JsonFileRecorder.prototype.getSync = function (key) { }; JsonFileRecorder.prototype.hasSync = function (key) { - const filePath = path.join(this.baseDirPath, key); try { + const filePath = path.join(this.baseDirPath, key); return fs.existsSync(filePath); } catch (_err) { return false; @@ -1027,15 +1033,63 @@ JsonFileRecorder.prototype.hasSync = function (key) { }; JsonFileRecorder.prototype.deleteSync = function (key) { - const filePath = path.join(this.baseDirPath, key); try { + const filePath = path.join(this.baseDirPath, key); fs.unlinkSync(filePath); } catch (_err) { // pass } }; -JsonFileRecorder.prototype.generateKey = function (fields) { +/** + * @param {Object} options + * @param {string[]} options.hosts + * @param {string} options.accessKey + * @param {string} options.bucketName + * @param {string} options.key + * @param {string} options.filePath + * @param {string} options.version + * @param {string} options.partSize + * @returns {string | undefined} + */ +JsonFileRecorder.prototype.generateKeySync = function (options) { + // if some options not pass in, can't generate a valid key + if ( + [ + Array.isArray(options.hosts), + options.accessKey, + options.bucketName, + options.key, + options.filePath, + options.version, + options.partSize + ].some(v => !v) + ) { + return; + } + + let fileStats; + try { + fileStats = options.filePath && fs.statSync(options.filePath); + } catch (_err) { + return; + } + + const fields = [ + options.hosts.join(''), + options.accessKey, + options.bucketName, + options.key || '', + options.filePath, + // use `stats.mtimeMs` when min version of Node.js update to ≥ v8.1.0 + fileStats ? fileStats.mtime.getTime().toString() : '', + fileStats ? fileStats.size.toString() : '', + options.version, // the upload version + options.version === 'v1' + ? conf.BLOCK_SIZE.toString() + : options.partSize.toString(), + 'json.v1' // the record file format version + ]; const h = crypto.createHash('sha1'); fields.forEach(v => { h.update(v); diff --git a/qiniu/util.js b/qiniu/util.js index 0509cee..c1678b8 100644 --- a/qiniu/util.js +++ b/qiniu/util.js @@ -380,7 +380,3 @@ exports.prepareZone = function (ctx, accessKey, bucket, callback) { }); } }; - -exports.writeOrCreateSync = function () { - -}; diff --git a/test/httpc.test.js b/test/httpc.test.js index f821a48..f19bf66 100644 --- a/test/httpc.test.js +++ b/test/httpc.test.js @@ -617,7 +617,7 @@ describe('test http module', function () { return Promise.all(endpointsProviders.map(e => e.getEndpoints())); }) .then(regionsEndpoints => { - // use `Array.prototype.flat` if migrate to node v11.15 + // use `Array.prototype.flat` when min version of Node.js update to ≥ v11.15 const regionsEndpointValues = regionsEndpoints.map( endpoints => endpoints.map(e => e.getValue()) diff --git a/test/resume_up.test.js b/test/resume_up.test.js index 0e4b53c..30f9b9d 100644 --- a/test/resume_up.test.js +++ b/test/resume_up.test.js @@ -509,21 +509,39 @@ describe('test resume up', function () { putExtra.partSize = partSize; } - const filepath = path.join(os.tmpdir(), key); - const result = createRandomFile(filepath, fileSizeMB * (1 << 20)) + let upHosts = []; + const filePath = path.join(os.tmpdir(), key); + const result = createRandomFile(filePath, fileSizeMB * (1 << 20)) + // mock file .then(() => { // add to auto clean file - filepathListToDelete.push(filepath); + filepathListToDelete.push(filePath); filepathListToDelete.push(putExtra.resumeRecordFile); // upload and abort putExtra.progressCallback = (_uploaded, _total) => { throw new Error('mocked error'); }; + }) + // get up hosts for generating resume key later + .then(() => resumeUploader.config.getRegionsProvider({ + accessKey: accessKey, + bucketName: bucketName + })) + .then(regionsProvider => regionsProvider.getRegions()) + .then(regions => { + const serviceName = resumeUploader.config.accelerateUploading + ? SERVICE_NAME.UP_ACC + : SERVICE_NAME.UP; + upHosts = regions[0].services[serviceName].map(e => e.host); + }) + // get up hosts end + // mock upload failed + .then(() => { return resumeUploader.putFile( uploadToken, key, - filepath, + filePath, putExtra ) .catch(err => { @@ -532,8 +550,8 @@ describe('test resume up', function () { } }); }) + // try to upload from resume point .then(() => { - // try to upload from resume point const couldResume = Boolean(putExtra.resumeRecordFile || putExtra.resumeRecorder); let isFirstPart = true; putExtra.progressCallback = (uploaded, _total) => { @@ -545,7 +563,6 @@ describe('test resume up', function () { throw new Error('should resume'); } if (!couldResume && uploaded / partSize > 1) { - console.log('lihs debug:', { couldResume, uploaded }); throw new Error('should not resume'); } }; @@ -553,7 +570,7 @@ describe('test resume up', function () { resumeUploader.putFile( uploadToken, key, - filepath, + filePath, putExtra, callback ) @@ -574,20 +591,15 @@ describe('test resume up', function () { )); } else { should.exist(putExtra.resumeRecorder); - let fileLastModify; - try { - fileLastModify = filepath && fs.statSync(filepath).mtimeMs.toString(); - } catch (_err) { - fileLastModify = ''; - } - const recordValuesToHash = [ - putExtra.version, + const expectResumeKey = putExtra.resumeRecorder.generateKeySync({ + hosts: upHosts, accessKey, - `${bucketName}:${key}`, - filepath, - fileLastModify - ]; - const expectResumeKey = putExtra.resumeRecorder.generateKey(recordValuesToHash); + bucketName, + key, + filePath, + version: version || 'v1', + partSize: partSize || qiniu.conf.BLOCK_SIZE + }); should.ok(!fs.existsSync( path.join( resumeRecorderOption.baseDirPath, From 3018775b6385c2784d63a09fc8f070e8ec8e98b6 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Fri, 19 Jul 2024 15:03:46 +0800 Subject: [PATCH 13/18] fix: uplaod resume record file name lost ext and improve resume test cases --- qiniu/storage/resume.js | 2 +- test/resume_up.test.js | 98 +++++++++++++++++++++++------------------ 2 files changed, 57 insertions(+), 43 deletions(-) diff --git a/qiniu/storage/resume.js b/qiniu/storage/resume.js index 8de3aaa..e4c95e5 100644 --- a/qiniu/storage/resume.js +++ b/qiniu/storage/resume.js @@ -970,7 +970,7 @@ function getDefaultPutExtra (putExtra, options) { if (putExtra.resumeRecordFile) { const parsedPath = path.parse(path.resolve(putExtra.resumeRecordFile)); putExtra.resumeRecorder = createResumeRecorderSync(parsedPath.dir); - putExtra.resumeKey = parsedPath.name; + putExtra.resumeKey = parsedPath.base; } return putExtra; diff --git a/test/resume_up.test.js b/test/resume_up.test.js index 30f9b9d..dae78f2 100644 --- a/test/resume_up.test.js +++ b/test/resume_up.test.js @@ -509,19 +509,13 @@ describe('test resume up', function () { putExtra.partSize = partSize; } - let upHosts = []; + let recordPersistPath = ''; const filePath = path.join(os.tmpdir(), key); const result = createRandomFile(filePath, fileSizeMB * (1 << 20)) // mock file .then(() => { // add to auto clean file filepathListToDelete.push(filePath); - filepathListToDelete.push(putExtra.resumeRecordFile); - - // upload and abort - putExtra.progressCallback = (_uploaded, _total) => { - throw new Error('mocked error'); - }; }) // get up hosts for generating resume key later .then(() => resumeUploader.config.getRegionsProvider({ @@ -530,14 +524,49 @@ describe('test resume up', function () { })) .then(regionsProvider => regionsProvider.getRegions()) .then(regions => { - const serviceName = resumeUploader.config.accelerateUploading - ? SERVICE_NAME.UP_ACC - : SERVICE_NAME.UP; - upHosts = regions[0].services[serviceName].map(e => e.host); + /** @type {string[]} */ + const upAccEndpoints = regions[0].services[SERVICE_NAME.UP_ACC] || []; + const upEndpoints = regions[0].services[SERVICE_NAME.UP] || []; + const upHosts = upAccEndpoints.concat(upEndpoints).map(e => e.host); + return Promise.resolve(upHosts); }) // get up hosts end + // get record file path + .then(upHosts => { + if (resumeRecordFile) { + recordPersistPath = putExtra.resumeRecordFile; + } else if (resumeRecorderOption) { + if (resumeRecorderOption.resumeKey) { + recordPersistPath = path.join( + resumeRecorderOption.baseDirPath, + resumeRecorderOption.resumeKey + ); + } else if (putExtra.resumeRecorder) { + const expectResumeKey = putExtra.resumeRecorder.generateKeySync({ + hosts: upHosts, + accessKey, + bucketName, + key, + filePath, + version: version || 'v1', + partSize: partSize || qiniu.conf.BLOCK_SIZE + }); + recordPersistPath = path.join( + resumeRecorderOption.baseDirPath, + expectResumeKey + ); + } + } + if (recordPersistPath) { + filepathListToDelete.push(recordPersistPath); + } + }) // mock upload failed .then(() => { + // upload and abort + putExtra.progressCallback = (_uploaded, _total) => { + throw new Error('mocked error'); + }; return resumeUploader.putFile( uploadToken, key, @@ -550,19 +579,29 @@ describe('test resume up', function () { } }); }) + // check record file + .then(() => { + if (putExtra.resumeRecordFile || putExtra.resumeRecorder) { + should.exists(recordPersistPath); + should.ok(fs.existsSync(recordPersistPath), 'record file should exists'); + } + }) // try to upload from resume point .then(() => { const couldResume = Boolean(putExtra.resumeRecordFile || putExtra.resumeRecorder); - let isFirstPart = true; + let isFirstPart = true; // 是否首次片上传请求成功,断点续传时是从断点开始首次上传成功的片计算 putExtra.progressCallback = (uploaded, _total) => { if (!isFirstPart) { return; } + const partNumber = partSize + ? uploaded / partSize + : uploaded / (4 * 1024 * 1024); isFirstPart = false; - if (couldResume && uploaded / partSize <= 1) { + if (couldResume && partNumber <= 1) { throw new Error('should resume'); } - if (!couldResume && uploaded / partSize > 1) { + if (!couldResume && partNumber > 1) { throw new Error('should not resume'); } }; @@ -579,34 +618,9 @@ describe('test resume up', function () { const checkFunc = ({ data }) => { data.should.have.keys('key', 'hash'); - if (resumeRecordFile) { - should.ok(!fs.existsSync(putExtra.resumeRecordFile)); - } else if (resumeRecorderOption) { - if (resumeRecorderOption.resumeKey) { - should.ok(!fs.existsSync( - path.join( - resumeRecorderOption.baseDirPath, - resumeRecorderOption.resumeKey - ) - )); - } else { - should.exist(putExtra.resumeRecorder); - const expectResumeKey = putExtra.resumeRecorder.generateKeySync({ - hosts: upHosts, - accessKey, - bucketName, - key, - filePath, - version: version || 'v1', - partSize: partSize || qiniu.conf.BLOCK_SIZE - }); - should.ok(!fs.existsSync( - path.join( - resumeRecorderOption.baseDirPath, - expectResumeKey - ) - )); - } + if (putExtra.resumeRecordFile || putExtra.resumeRecorder) { + should.exists(recordPersistPath); + should.ok(!fs.existsSync(recordPersistPath)); } }; From 8a6cd9a7844f3e4c56c4fc0ab027634c3bee2b5c Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Fri, 19 Jul 2024 15:14:29 +0800 Subject: [PATCH 14/18] feat: use uc hosts aligns with query region hosts by default --- qiniu/conf.js | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/qiniu/conf.js b/qiniu/conf.js index 976ae1a..bceb941 100644 --- a/qiniu/conf.js +++ b/qiniu/conf.js @@ -44,11 +44,13 @@ Object.defineProperty(exports, 'QUERY_REGION_HOST', { QUERY_REGION_BACKUP_HOSTS = []; } }); -let UC_HOST = 'uc.qbox.me'; +let UC_BACKUP_HOSTS = QUERY_REGION_BACKUP_HOSTS.slice(); +let UC_HOST = QUERY_REGION_HOST; Object.defineProperty(exports, 'UC_HOST', { get: () => UC_HOST, set: v => { UC_HOST = v; + UC_BACKUP_HOSTS = []; QUERY_REGION_HOST = v; QUERY_REGION_BACKUP_HOSTS = []; } @@ -108,11 +110,10 @@ const Config = (function () { return this.ucEndpointsProvider; } - return new Endpoint( - UC_HOST, - { + return new StaticEndpointsProvider( + [UC_HOST].concat(UC_BACKUP_HOSTS).map(h => new Endpoint(h, { defaultScheme: this.useHttpsDomain ? 'https' : 'http' - } + })) ); }; From 9c261b8e0c841c12e2f61d60fef6da70e349d308 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Fri, 19 Jul 2024 15:15:03 +0800 Subject: [PATCH 15/18] chore: improve query region provide error text --- qiniu/httpc/regionsProvider.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qiniu/httpc/regionsProvider.js b/qiniu/httpc/regionsProvider.js index 0ef1586..8cf1b0d 100644 --- a/qiniu/httpc/regionsProvider.js +++ b/qiniu/httpc/regionsProvider.js @@ -687,9 +687,9 @@ const QueryRegionsProvider = (function () { if (!respWrapper.ok()) { return Promise.reject( new Error( - 'Query regions failed with' + + 'Query regions failed with ' + `HTTP Status Code ${respWrapper.resp.statusCode}, ` + - `Body ${respWrapper.data}` + `Body ${JSON.stringify(respWrapper.resp.data)}` ) ); } From 3466170310f40fdfd39255c976b6a63b8b017490 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Fri, 19 Jul 2024 16:47:09 +0800 Subject: [PATCH 16/18] fix(test): default uc provider check --- test/conf.test.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/conf.test.js b/test/conf.test.js index 391e580..fc899ac 100644 --- a/test/conf.test.js +++ b/test/conf.test.js @@ -7,7 +7,8 @@ const fs = require('fs'); describe('test Config class', function () { const { - Config + Config, + UC_HOST } = qiniu.conf; const { Endpoint, @@ -49,8 +50,8 @@ describe('test Config class', function () { return config.getUcEndpointsProvider() .getEndpoints() .then(endpoints => { - should.equal(endpoints.length, 1); - should.equal(endpoints[0].getValue(), `${scheme}://uc.qbox.me`); + endpoints.length.should.greaterThanOrEqual(1); + should.equal(endpoints[0].getValue(), `${scheme}://${UC_HOST}`); }); })); }); From 05886e4ab86d355ac8cca1342126d3be459503d9 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Mon, 29 Jul 2024 06:23:56 +0800 Subject: [PATCH 17/18] fix: lost uploading progress when retry by change service --- qiniu/httpc/regionsRetryPolicy.js | 18 +++++++++------- test/httpc.test.js | 35 ++++++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/qiniu/httpc/regionsRetryPolicy.js b/qiniu/httpc/regionsRetryPolicy.js index 45a8eb8..af3fd04 100644 --- a/qiniu/httpc/regionsRetryPolicy.js +++ b/qiniu/httpc/regionsRetryPolicy.js @@ -89,22 +89,22 @@ RegionsRetryPolicy.prototype.shouldRetry = function (context) { * @returns {Promise} */ RegionsRetryPolicy.prototype.prepareRetry = function (context) { + let handleChangedRegionPromise = Promise.resolve(); if (context.alternativeServiceNames.length) { context.serviceName = context.alternativeServiceNames.shift(); } else if (context.alternativeRegions.length) { context.region = context.alternativeRegions.shift(); [context.serviceName, ...context.alternativeServiceNames] = this.serviceNames; + if (typeof this.onChangedRegion === 'function') { + handleChangedRegionPromise = this.onChangedRegion(context); + } } else { return Promise.reject( new Error('There isn\'t available region or service for next try') ); } - return this._prepareEndpoints(context) - .then(() => { - if (typeof this.onChangedRegion === 'function') { - return this.onChangedRegion(context); - } - }); + return handleChangedRegionPromise + .then(() => this._prepareEndpoints(context)); }; /** @@ -211,6 +211,7 @@ RegionsRetryPolicy.prototype._initRegions = function (options) { * @protected */ RegionsRetryPolicy.prototype._prepareEndpoints = function (context) { + let handleChangedRegionsPromise = Promise.resolve(); [context.endpoint, ...context.alternativeEndpoints] = context.region.services[context.serviceName] || []; while (!context.endpoint) { if (context.alternativeServiceNames.length) { @@ -224,6 +225,9 @@ RegionsRetryPolicy.prototype._prepareEndpoints = function (context) { // compatible, remove when make break changes this.serviceName = context.serviceName; [context.endpoint, ...context.alternativeEndpoints] = context.region.services[context.serviceName] || []; + if (typeof this.onChangedRegion === 'function') { + handleChangedRegionsPromise = this.onChangedRegion(context); + } } else { return Promise.reject(new Error( 'There isn\'t available endpoint for ' + @@ -232,7 +236,7 @@ RegionsRetryPolicy.prototype._prepareEndpoints = function (context) { )); } } - return Promise.resolve(); + return handleChangedRegionsPromise; }; exports.RegionsRetryPolicy = RegionsRetryPolicy; diff --git a/test/httpc.test.js b/test/httpc.test.js index f19bf66..800707a 100644 --- a/test/httpc.test.js +++ b/test/httpc.test.js @@ -1308,7 +1308,7 @@ describe('test http module', function () { }); }); - it('test onChangedRegion', function () { + it('test onChangedRegion should call', function () { let regionChangedTimes = 0; const regionsRetryPolicy = new RegionsRetryPolicy({ regionsProvider: regionsProvider, @@ -1343,6 +1343,39 @@ describe('test http module', function () { }); }); + it('test onChangedRegion should not call', function () { + const regions = [ + Region.fromRegionId('z0'), + Region.fromRegionId('z1') + ]; + regions[0].services[SERVICE_NAME.UP_ACC] = [ + new Endpoint(`${bucketName}.kodo-accelerate.cn-east-1.qiniucs.com`) + ]; + const regionsProvider = new StaticRegionsProvider(regions); + let regionChangedTimes = 0; + const regionsRetryPolicy = new RegionsRetryPolicy({ + regionsProvider: regionsProvider, + serviceNames: [SERVICE_NAME.UP_ACC, SERVICE_NAME.UP], + onChangedRegion: () => { + regionChangedTimes++; + return Promise.resolve(); + } + }); + + const mockedContext = { + error: null, + retried: false + }; + + return regionsRetryPolicy.initContext(mockedContext) + .then(() => { + return regionsRetryPolicy.prepareRetry(mockedContext); + }) + .then(() => { + should.equal(regionChangedTimes, 0); + }); + }); + it('test init context with preferredEndpoints', function () { const preferredEndpoints = [ new Endpoint('https://preferred-endpoint.example.com'), From a63f89180bbb198a6f5110969d171332ed7501b9 Mon Sep 17 00:00:00 2001 From: lihsai0 Date: Mon, 29 Jul 2024 06:25:25 +0800 Subject: [PATCH 18/18] fix: type declare --- index.d.ts | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/index.d.ts b/index.d.ts index 6e2fcf6..2293930 100644 --- a/index.d.ts +++ b/index.d.ts @@ -189,6 +189,10 @@ export declare namespace conf { } class Config { useHttpsDomain: boolean; + accelerateUploading: boolean; + /** + * @deprecated 实际已无加速上传能力,使用 accelerateUploading 代替 + */ useCdnDomain: boolean; ucEndpointsProvider?: httpc.EndpointsProvider | null; queryRegionsEndpointsProvider?: httpc.EndpointsProvider | null; @@ -346,7 +350,7 @@ export declare namespace form_up { export declare namespace resume_up { type UploadResult = { data: any; - resp: IncomingMessage; + resp: Omit & { requestUrls: string[] }; } class ResumeUploader { @@ -588,7 +592,7 @@ export declare namespace httpc { // responseWrapper.js interface ResponseWrapperOptions { data: T; - resp: IncomingMessage; + resp: Omit & { requestUrls: string[] }; } interface ResponseError { @@ -598,7 +602,7 @@ export declare namespace httpc { class ResponseWrapper { data: T extends void ? undefined | ResponseError : T & ResponseError; - resp: IncomingMessage; + resp: Omit & { requestUrls: string[] }; constructor(options: ResponseWrapperOptions); ok(): boolean; needRetry(): boolean;