Skip to content

Commit 30a7280

Browse files
committed
fix the bug of nodejs stream read chunk size not fixed, which caused the error like Error: block size must be not greater than 4194304
1 parent 48f5ca6 commit 30a7280

File tree

3 files changed

+22
-8
lines changed

3 files changed

+22
-8
lines changed

CHANGELOG.md

+3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
## CHANGE LOG
22

3+
## v7.2.0
4+
- 修复node的stream读取的chunk大小比较随意的问题
5+
36
## v7.1.9
47
- 修复新版node下resume up方式文件内容被缓存而导致的上传失败
58

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "qiniu",
3-
"version": "7.1.9",
3+
"version": "7.2.0",
44
"description": "Node wrapper for Qiniu Resource (Cloud) Storage API",
55
"main": "index.js",
66
"directories": {

qiniu/storage/resume.js

+18-7
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,8 @@ function putReq(config, uploadToken, key, rsStream, rsStreamLen, putExtra,
119119
var finishedBlock = 0;
120120
var curBlock = 0;
121121
var readLen = 0;
122+
var bufferLen = 0;
123+
var remainedData = new Buffer(0);
122124
var readBuffers = [];
123125
var finishedCtxList = [];
124126
var finishedBlkPutRets = [];
@@ -151,19 +153,26 @@ function putReq(config, uploadToken, key, rsStream, rsStreamLen, putExtra,
151153
//check when to mkblk
152154
rsStream.on('data', function(chunk) {
153155
readLen += chunk.length;
156+
bufferLen += chunk.length;
154157
readBuffers.push(chunk);
155158

156-
if (readLen % conf.BLOCK_SIZE == 0 || readLen == fileSize) {
157-
//console.log(readLen);
158-
var readData = Buffer.concat(readBuffers);
159-
readBuffers = []; //reset read buffer
159+
if (bufferLen >= conf.BLOCK_SIZE || readLen == fileSize) {
160+
var readBuffersData = Buffer.concat(readBuffers);
161+
var blockSize = conf.BLOCK_SIZE - remainedData.length;
162+
163+
var postData = Buffer.concat([remainedData, readBuffersData.slice(0,blockSize)]);
164+
remainedData = new Buffer(readBuffersData.slice(blockSize,bufferLen));
165+
bufferLen = bufferLen - conf.BLOCK_SIZE;
166+
//reset buffer
167+
readBuffers = [];
168+
160169
curBlock += 1; //set current block
161170
if (curBlock > finishedBlock) {
162171
rsStream.pause();
163-
mkblkReq(upDomain, uploadToken, readData, function(respErr,
172+
mkblkReq(upDomain, uploadToken, postData, function(respErr,
164173
respBody,
165174
respInfo) {
166-
var bodyCrc32 = parseInt("0x" + getCrc32(readData));
175+
var bodyCrc32 = parseInt("0x" + getCrc32(postData));
167176
if (respInfo.statusCode != 200 || respBody.crc32 != bodyCrc32) {
168177
callbackFunc(respErr, respBody, respInfo);
169178
rsStream.close();
@@ -258,7 +267,9 @@ function mkfileReq(upDomain, uploadToken, fileSize, ctxList, key, putExtra,
258267
ResumeUploader.prototype.putFile = function(uploadToken, key, localFile,
259268
putExtra, callbackFunc) {
260269
putExtra = putExtra || new PutExtra();
261-
var rsStream = fs.createReadStream(localFile);
270+
var rsStream = fs.createReadStream(localFile,{
271+
highWaterMark: conf.BLOCK_SIZE,
272+
});
262273
var rsStreamLen = fs.statSync(localFile).size;
263274
if (!putExtra.mimeType) {
264275
putExtra.mimeType = mime.getType(localFile);

0 commit comments

Comments
 (0)