Skip to content
Open
1 change: 1 addition & 0 deletions cmd/mount.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ be interacting with the file system.`)
EnableMonitoring: cfg.IsMetricsEnabled(&newConfig.Metrics),
LogSeverity: newConfig.Logging.Severity,
AppendThreshold: 1 << 21, // 2 MiB, a total guess.
ChunkRetryDeadlineSecs: newConfig.GcsRetries.ChunkRetryDeadlineSecs,
ChunkTransferTimeoutSecs: newConfig.GcsRetries.ChunkTransferTimeoutSecs,
TmpObjectPrefix: ".gcsfuse_tmp/",
FinalizeFileForRapid: newConfig.Write.FinalizeFileForRapid,
Expand Down
2 changes: 2 additions & 0 deletions internal/bufferedwrites/buffered_write_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ type CreateBWHandlerRequest struct {
BlockSize int64
MaxBlocksPerFile int64
GlobalMaxBlocksSem *semaphore.Weighted
ChunkRetryDeadlineSecs int64
ChunkTransferTimeoutSecs int64
}

Expand All @@ -120,6 +121,7 @@ func NewBWHandler(req *CreateBWHandlerRequest) (bwh BufferedWriteHandler, err er
BlockPool: bp,
MaxBlocksPerFile: req.MaxBlocksPerFile,
BlockSize: req.BlockSize,
ChunkRetryDeadlineSecs: req.ChunkRetryDeadlineSecs,
ChunkTransferTimeoutSecs: req.ChunkTransferTimeoutSecs,
}),
totalSize: size,
Expand Down
6 changes: 5 additions & 1 deletion internal/bufferedwrites/buffered_write_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,10 @@ import (
"golang.org/x/sync/semaphore"
)

const chunkTransferTimeoutSecs int64 = 10
const (
chunkRetryDeadlineSecs int64 = 120
chunkTransferTimeoutSecs int64 = 10
)

var errUploadFailure = errors.New("error while uploading object to GCS")

Expand Down Expand Up @@ -61,6 +64,7 @@ func (testSuite *BufferedWriteTest) setupTestWithBucketType(bucketType gcs.Bucke
BlockSize: blockSize,
MaxBlocksPerFile: 10,
GlobalMaxBlocksSem: testSuite.globalSemaphore,
ChunkRetryDeadlineSecs: chunkRetryDeadlineSecs,
ChunkTransferTimeoutSecs: chunkTransferTimeoutSecs,
})
require.Nil(testSuite.T(), err)
Expand Down
5 changes: 4 additions & 1 deletion internal/bufferedwrites/upload_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type UploadHandler struct {
bucket gcs.Bucket
objectName string
obj *gcs.Object
chunkRetryDeadline int64
chunkTransferTimeout int64
blockSize int64
}
Expand All @@ -66,6 +67,7 @@ type CreateUploadHandlerRequest struct {
BlockPool *block.GenBlockPool[block.Block]
MaxBlocksPerFile int64
BlockSize int64
ChunkRetryDeadlineSecs int64
ChunkTransferTimeoutSecs int64
}

Expand All @@ -79,6 +81,7 @@ func newUploadHandler(req *CreateUploadHandlerRequest) *UploadHandler {
objectName: req.ObjectName,
obj: req.Object,
blockSize: req.BlockSize,
chunkRetryDeadline: req.ChunkRetryDeadlineSecs,
chunkTransferTimeout: req.ChunkTransferTimeoutSecs,
}
return uh
Expand All @@ -102,7 +105,7 @@ func (uh *UploadHandler) Upload(block block.Block) error {

// createObjectWriter creates a GCS object writer.
func (uh *UploadHandler) createObjectWriter() (err error) {
req := gcs.NewCreateObjectRequest(uh.obj, uh.objectName, nil, uh.chunkTransferTimeout)
req := gcs.NewCreateObjectRequest(uh.obj, uh.objectName, nil, uh.chunkRetryDeadline, uh.chunkTransferTimeout)
// We need a new context here, since the first writeFile() call will be complete
// (and context will be cancelled) by the time complete upload is done.
var ctx context.Context
Expand Down
2 changes: 2 additions & 0 deletions internal/bufferedwrites/upload_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ func (t *UploadHandlerTest) SetupTest() {
BlockPool: t.blockPool,
MaxBlocksPerFile: maxBlocks,
BlockSize: blockSize,
ChunkRetryDeadlineSecs: chunkRetryDeadlineSecs,
ChunkTransferTimeoutSecs: chunkTransferTimeoutSecs,
})
}
Expand All @@ -83,6 +84,7 @@ func (t *UploadHandlerTest) createUploadHandlerWithObjectOfGivenSize(size uint64
BlockPool: t.blockPool,
MaxBlocksPerFile: maxBlocks,
BlockSize: blockSize,
ChunkRetryDeadlineSecs: chunkRetryDeadlineSecs,
ChunkTransferTimeoutSecs: chunkTransferTimeoutSecs,
})
}
Expand Down
2 changes: 2 additions & 0 deletions internal/fs/fs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,7 @@ func currentGid() uint32 {
type fakeBucketManager struct {
buckets map[string]gcs.Bucket
appendThreshold int64
chunkRetryDeadlineSecs int64
chunkTransferTimeoutSecs int64
tmpObjectPrefix string
}
Expand All @@ -381,6 +382,7 @@ func (bm *fakeBucketManager) SetUpBucket(
if ok {
sb = gcsx.NewSyncerBucket(
bm.appendThreshold,
bm.chunkRetryDeadlineSecs,
bm.chunkTransferTimeoutSecs,
bm.tmpObjectPrefix,
gcsx.NewContentTypeBucket(bucket),
Expand Down
2 changes: 1 addition & 1 deletion internal/fs/gcs_metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (bm *fakeBucketManagerWithMetrics) SetUpBucket(

// Wrap bucket with monitor.NewMonitoringBucket to enable GCS metrics.
sb = gcsx.NewSyncerBucket(
0, 10, ".gcsfuse_tmp/",
0 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/",
gcsx.NewContentTypeBucket(monitor.NewMonitoringBucket(bucket, mh)),
)
return sb, err
Expand Down
2 changes: 1 addition & 1 deletion internal/fs/handle/dir_handle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func init() { RegisterTestSuite(&DirHandleTest{}) }
func (t *DirHandleTest) SetUp(ti *TestInfo) {
t.ctx = ti.Ctx
t.bucket = gcsx.NewSyncerBucket(
1, 10, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{}))
1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{}))
t.clock.SetTime(time.Date(2022, 8, 15, 22, 56, 0, 0, time.Local))
t.resetDirHandle()
}
Expand Down
10 changes: 5 additions & 5 deletions internal/fs/handle/file_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func TestFileTestSuite(t *testing.T) {
func (t *fileTest) SetupTest() {
t.ctx = context.TODO()
t.clock.SetTime(time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local))
t.bucket = gcsx.NewSyncerBucket(1, 10, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{}))
t.bucket = gcsx.NewSyncerBucket(1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{}))
}

func (t *fileTest) TearDownTest() {
Expand Down Expand Up @@ -668,7 +668,7 @@ func (t *fileTest) Test_ReadWithMrdKernelReader_Success() {
// Mock CreateObject which is called by createFileInode.
mockBucket.On("CreateObject", mock.Anything, mock.Anything).Return(&gcs.Object{}, nil).Once()
// Create File Inode.
mockSyncerBucket := gcsx.NewSyncerBucket(1, 10, ".gcsfuse_tmp/", mockBucket)
mockSyncerBucket := gcsx.NewSyncerBucket(1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", mockBucket)
parent := createDirInode(&mockSyncerBucket, &t.clock)
in := createFileInode(t.T(), &mockSyncerBucket, &t.clock, &cfg.Config{}, parent, objectName, expectedData, false)
// Create File Handle.
Expand Down Expand Up @@ -697,7 +697,7 @@ func (t *fileTest) Test_ReadWithMrdKernelReader_Success() {

func (t *fileTest) Test_ReadWithMrdKernelReader_NotAuthoritative() {
// 1. Setup
zonalBucket := gcsx.NewSyncerBucket(1, 10, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "zonal_bucket", gcs.BucketType{Zonal: true}))
zonalBucket := gcsx.NewSyncerBucket(1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "zonal_bucket", gcs.BucketType{Zonal: true}))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the comment here looks odd. please see what is the recommended way for golang

originalData := []byte("some data") // 9 bytes
parent := createDirInode(&zonalBucket, &t.clock)
in := createFileInode(t.T(), &zonalBucket, &t.clock, &cfg.Config{FileSystem: cfg.FileSystemConfig{EnableKernelReader: true}}, parent, "test_obj", originalData, false)
Expand Down Expand Up @@ -731,7 +731,7 @@ func (t *fileTest) Test_ReadWithMrdKernelReader_NotAuthoritative() {

func (t *fileTest) Test_ReadWithMrdKernelReader_NilReader() {
// 1. Setup with a non-zonal bucket.
nonZonalBucket := gcsx.NewSyncerBucket(1, 10, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "non_zonal_bucket", gcs.BucketType{Zonal: true}))
nonZonalBucket := gcsx.NewSyncerBucket(1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "non_zonal_bucket", gcs.BucketType{Zonal: true}))
parent := createDirInode(&nonZonalBucket, &t.clock)
in := createFileInode(t.T(), &nonZonalBucket, &t.clock, &cfg.Config{}, parent, "test_obj", []byte("data"), false)
// Create file handle.
Expand Down Expand Up @@ -761,7 +761,7 @@ func (t *fileTest) Test_ReadWithMrdKernelReader_ReadAtError() {
mockBucket.On("BucketType").Return(gcs.BucketType{Zonal: true})
mockBucket.On("CreateObject", mock.Anything, mock.Anything).Return(&gcs.Object{}, nil).Once()
// Create file inode.
mockSyncerBucket := gcsx.NewSyncerBucket(1, 10, ".gcsfuse_tmp/", mockBucket)
mockSyncerBucket := gcsx.NewSyncerBucket(1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", mockBucket)
parent := createDirInode(&mockSyncerBucket, &t.clock)
in := createFileInode(t.T(), &mockSyncerBucket, &t.clock, &cfg.Config{FileSystem: cfg.FileSystemConfig{EnableKernelReader: true}}, parent, objectName, expectedData, false)
// Create file handle.
Expand Down
15 changes: 10 additions & 5 deletions internal/fs/inode/base_dir_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,10 @@ import (
"github.com/jacobsa/timeutil"
)

const ChunkTransferTimeoutSecs = 10
const (
chunkRetryDeadlineSecs = 120
chunkTransferTimeoutSecs = 10
)

func TestBaseDir(t *testing.T) { RunTests(t) }

Expand Down Expand Up @@ -62,14 +65,16 @@ func (t *BaseDirTest) SetUp(ti *TestInfo) {
buckets: make(map[string]gcsx.SyncerBucket),
}
t.bm.buckets["bucketA"] = gcsx.NewSyncerBucket(
1, // Append threshold
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
fake.NewFakeBucket(&t.clock, "bucketA", gcs.BucketType{}),
)
t.bm.buckets["bucketB"] = gcsx.NewSyncerBucket(
1, // Append threshold
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
fake.NewFakeBucket(&t.clock, "bucketB", gcs.BucketType{}),
)
Expand Down
2 changes: 1 addition & 1 deletion internal/fs/inode/core_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func init() { RegisterTestSuite(&CoreTest{}) }
func (t *CoreTest) SetUp(ti *TestInfo) {
t.ctx = ti.Ctx
t.bucket = gcsx.NewSyncerBucket(
1, 10, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{}))
1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{}))
t.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local))
}

Expand Down
2 changes: 1 addition & 1 deletion internal/fs/inode/dir_prefetcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func (t *DirPrefetchTest) setup(enablePrefetch bool, ttl time.Duration) (d *dirI
t.ctx = context.Background()
t.clock.SetTime(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC))
t.fake = fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{})
t.bucket = gcsx.NewSyncerBucket(1, 10, ".gcsfuse_tmp/", t.fake)
t.bucket = gcsx.NewSyncerBucket(1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", t.fake)

t.config = &cfg.Config{
MetadataCache: cfg.MetadataCacheConfig{
Expand Down
9 changes: 5 additions & 4 deletions internal/fs/inode/dir_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,9 @@ func (t *DirTest) SetupTest() {
t.clock.SetTime(time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local))
bucket := fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{})
t.bucket = gcsx.NewSyncerBucket(
1, // Append threshold
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
bucket)
// Create the inode. No implicit dirs by default.
Expand Down Expand Up @@ -2169,7 +2170,7 @@ func (t *DirTest) Test_IsTypeCacheDeprecated_true() {
func (t *DirTest) TestLookUpChild_TypeCacheDeprecated_CacheMiss() {
mockBucket := new(storagemock.TestifyMockBucket)
mockBucket.On("BucketType").Return(gcs.BucketType{})
syncerBucket := gcsx.NewSyncerBucket(1, ChunkTransferTimeoutSecs, ".gcsfuse_tmp/", mockBucket)
syncerBucket := gcsx.NewSyncerBucket(1 /* appendThreshold */, chunkRetryDeadlineSecs, chunkTransferTimeoutSecs, ".gcsfuse_tmp/", mockBucket)
oldBucket := t.bucket
t.bucket = syncerBucket
defer func() { t.bucket = oldBucket }()
Expand Down Expand Up @@ -2214,7 +2215,7 @@ func (t *DirTest) TestLookUpChild_TypeCacheDeprecated_CacheMiss() {
func (t *DirTest) TestLookUpChild_TypeCacheDeprecated_CacheHit() {
mockBucket := new(storagemock.TestifyMockBucket)
mockBucket.On("BucketType").Return(gcs.BucketType{})
syncerBucket := gcsx.NewSyncerBucket(1, ChunkTransferTimeoutSecs, ".gcsfuse_tmp/", mockBucket)
syncerBucket := gcsx.NewSyncerBucket(1 /* appendThreshold */, chunkRetryDeadlineSecs, chunkTransferTimeoutSecs, ".gcsfuse_tmp/", mockBucket)
oldBucket := t.bucket
t.bucket = syncerBucket
defer func() { t.bucket = oldBucket }()
Expand Down
1 change: 1 addition & 0 deletions internal/fs/inode/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -1145,6 +1145,7 @@ func (f *FileInode) InitBufferedWriteHandlerIfEligible(ctx context.Context, open
BlockSize: f.config.Write.BlockSizeMb * util.MiB,
MaxBlocksPerFile: f.config.Write.MaxBlocksPerFile,
GlobalMaxBlocksSem: f.globalMaxWriteBlocksSem,
ChunkRetryDeadlineSecs: f.config.GcsRetries.ChunkRetryDeadlineSecs,
ChunkTransferTimeoutSecs: f.config.GcsRetries.ChunkTransferTimeoutSecs,
})
if errors.Is(err, block.CantAllocateAnyBlockError) {
Expand Down
10 changes: 6 additions & 4 deletions internal/fs/inode/file_mock_bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,9 @@ func (t *FileMockBucketTest) createLockedInode(fileName string, fileType string)
fileName,
)
syncerBucket := gcsx.NewSyncerBucket(
1, // Append threshold
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
t.bucket)

Expand Down Expand Up @@ -130,8 +131,9 @@ func (t *FileMockBucketTest) createLockedInode(fileName string, fileType string)
func (t *FileMockBucketTest) createGCSBackedFileInode(backingObj *gcs.MinObject) *FileInode {
t.T().Helper()
syncerBucket := gcsx.NewSyncerBucket(
1, // Append threshold
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
t.bucket)

Expand Down
5 changes: 3 additions & 2 deletions internal/fs/inode/file_streaming_writes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,9 @@ func (t *FileStreamingWritesCommon) createInode(fileType string) {
fileName,
)
syncerBucket := gcsx.NewSyncerBucket(
1, // Append threshold
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
t.bucket)

Expand Down
5 changes: 3 additions & 2 deletions internal/fs/inode/file_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,9 @@ func (t *FileTest) createInodeWithLocalParam(fileName string, local bool) {
fileName,
)
syncerBucket := gcsx.NewSyncerBucket(
1, // Append threshold
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
t.bucket)

Expand Down
5 changes: 3 additions & 2 deletions internal/fs/inode/hns_dir_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,9 @@ func (t *hnsDirTest) setupTestSuite(hierarchical bool) {
t.mockBucket = new(storagemock.TestifyMockBucket)
t.mockBucket.On("BucketType").Return(gcs.BucketType{Hierarchical: hierarchical})
t.bucket = gcsx.NewSyncerBucket(
1,
ChunkTransferTimeoutSecs,
1, /* appendThreshold */
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
".gcsfuse_tmp/",
t.mockBucket)
t.resetDirInode(false, false, true)
Expand Down
2 changes: 1 addition & 1 deletion internal/fs/inode/recursive_cancellation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ type RecursiveCancellationTest struct {
func (t *RecursiveCancellationTest) SetupTest() {
t.clock.SetTime(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC))
t.fake = fake.NewFakeBucket(&t.clock, "some_bucket", gcs.BucketType{})
t.bucket = gcsx.NewSyncerBucket(1, 10, ".gcsfuse_tmp/", t.fake)
t.bucket = gcsx.NewSyncerBucket(1 /* appendThreshold */, 120 /* chunkRetryDeadlineSecs */, 10 /* chunkTransferTimeoutSecs */, ".gcsfuse_tmp/", t.fake)
t.config = &cfg.Config{
MetadataCache: cfg.MetadataCacheConfig{
EnableMetadataPrefetch: true,
Expand Down
5 changes: 3 additions & 2 deletions internal/fs/inode/symlink_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,9 @@ func init() { RegisterTestSuite(&SymlinkTest{}) }

func (t *SymlinkTest) SetUp(ti *TestInfo) {
bucket := gcsx.NewSyncerBucket(
1,
10, // ChunkTransferTimeoutSecs
1, /* appendThreshold */
120, /* chunkRetryDeadlineSecs */
10, /* chunkTransferTimeoutSecs */
".gcsfuse_tmp/",
fake.NewFakeBucket(timeutil.RealClock(), "some-bucket", gcs.BucketType{}),
)
Expand Down
2 changes: 2 additions & 0 deletions internal/gcsx/bucket_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ type BucketConfig struct {
// not be cleaned up, so the user must ensure that TmpObjectPrefix is
// periodically garbage collected.
AppendThreshold int64
ChunkRetryDeadlineSecs int64
ChunkTransferTimeoutSecs int64
TmpObjectPrefix string
// Used in Zonal buckets to determine if objects should be finalized or not.
Expand Down Expand Up @@ -255,6 +256,7 @@ func (bm *bucketManager) SetUpBucket(
}
sb = NewSyncerBucket(
bm.config.AppendThreshold,
bm.config.ChunkRetryDeadlineSecs,
bm.config.ChunkTransferTimeoutSecs,
bm.config.TmpObjectPrefix,
b)
Expand Down
3 changes: 2 additions & 1 deletion internal/gcsx/compose_object_creator.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ func (oc *composeObjectCreator) Create(
objectName string,
srcObject *gcs.Object,
mtime *time.Time,
chunkRetryDeadlineSecs int64,
chunkTransferTimeoutSecs int64,
r io.Reader) (o *gcs.Object, err error) {
// Choose a name for a temporary object.
Expand All @@ -97,7 +98,7 @@ func (oc *composeObjectCreator) Create(
}

// Create a temporary object containing the additional contents.
req := gcs.NewCreateObjectRequest(nil, tmpName, nil, chunkTransferTimeoutSecs)
req := gcs.NewCreateObjectRequest(nil, tmpName, nil, chunkRetryDeadlineSecs, chunkTransferTimeoutSecs)
req.Contents = r
tmp, err := oc.bucket.CreateObject(ctx, req)
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions internal/gcsx/compose_object_creator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ func (t *ComposeObjectCreatorTest) call() (o *gcs.Object, err error) {
t.srcObject.Name,
&t.srcObject,
&t.mtime,
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
strings.NewReader(t.srcContents))

Expand Down
2 changes: 2 additions & 0 deletions internal/gcsx/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,13 @@ func (t *IntegrationTest) SetUp(ti *TestInfo) {

// Set up the syncer.
const appendThreshold = 0
const chunkRetryDeadlineSecs = 120
const chunkTransferTimeoutSecs = 10
const tmpObjectPrefix = ".gcsfuse_tmp/"

t.syncer = gcsx.NewSyncer(
appendThreshold,
chunkRetryDeadlineSecs,
chunkTransferTimeoutSecs,
tmpObjectPrefix,
t.bucket)
Expand Down
Loading
Loading