Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions packages/js-sdk/src/envd/schema.gen.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions packages/js-sdk/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ export { getSignature } from './sandbox/signature'
export { FileType } from './sandbox/filesystem'
export type {
WriteInfo,
WriteOpts,
EntryInfo,
Filesystem,
FilesystemWriteOpts,
Expand Down
136 changes: 126 additions & 10 deletions packages/js-sdk/src/sandbox/filesystem/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,20 @@ export interface FilesystemReadOpts extends FilesystemRequestOpts {
gzip?: boolean
}

/**
* Options for the write operation.
*/
export interface WriteOpts extends FilesystemWriteOpts {
/**
* When `true`, the file data is split into chunks and uploaded in parallel,
* then composed into the final file on the server using zero-copy concatenation.
* This is useful for uploading large files.
*/
composite?: boolean
}

const DEFAULT_CHUNK_SIZE = 64 * 1024 * 1024 // 64 MB

export interface FilesystemListOpts extends FilesystemRequestOpts {
/**
* Depth of the directory to list.
Expand Down Expand Up @@ -358,21 +372,16 @@ export class Filesystem {
async write(
path: string,
data: string | ArrayBuffer | Blob | ReadableStream,
opts?: FilesystemWriteOpts
opts?: WriteOpts
): Promise<WriteInfo>
async write(
files: WriteEntry[],
opts?: FilesystemWriteOpts
): Promise<WriteInfo[]>
async write(
pathOrFiles: string | WriteEntry[],
dataOrOpts?:
| string
| ArrayBuffer
| Blob
| ReadableStream
| FilesystemWriteOpts,
opts?: FilesystemWriteOpts
dataOrOpts?: string | ArrayBuffer | Blob | ReadableStream | WriteOpts,
opts?: WriteOpts
): Promise<WriteInfo | WriteInfo[]> {
if (typeof pathOrFiles !== 'string' && !Array.isArray(pathOrFiles)) {
throw new Error('Path or files are required')
Expand All @@ -388,7 +397,7 @@ export class Filesystem {
typeof pathOrFiles === 'string'
? {
path: pathOrFiles,
writeOpts: opts as FilesystemWriteOpts,
writeOpts: opts as WriteOpts | undefined,
writeFiles: [
{
data: dataOrOpts as
Expand All @@ -401,7 +410,7 @@ export class Filesystem {
}
: {
path: undefined,
writeOpts: dataOrOpts as FilesystemWriteOpts,
writeOpts: dataOrOpts as WriteOpts | undefined,
writeFiles: pathOrFiles as WriteEntry[],
}

Expand All @@ -418,6 +427,11 @@ export class Filesystem {
const useOctetStream =
compareVersions(this.envdApi.version, ENVD_OCTET_STREAM_UPLOAD) >= 0

// Composite upload: chunk the data, upload parts in parallel, then compose
if (writeOpts?.composite && path && useOctetStream) {
return this.compositeWrite(path, writeFiles[0].data, user, writeOpts)
}

const results: WriteInfo[] = []

const useGzip = writeOpts?.gzip === true
Expand Down Expand Up @@ -821,4 +835,106 @@ export class Filesystem {
throw handleFilesystemRpcError(err)
}
}

private async compositeWrite(
destination: string,
data: string | ArrayBuffer | Blob | ReadableStream,
user: Username | undefined,
opts?: WriteOpts
): Promise<WriteInfo> {
const blob = await toBlob(data)
const totalSize = blob.size
const chunkSize = DEFAULT_CHUNK_SIZE
const useGzip = opts?.gzip === true

const headers: Record<string, string> = {
'Content-Type': 'application/octet-stream',
}
if (useGzip) {
headers['Content-Encoding'] = 'gzip'
}

// If the data fits in a single chunk, no need for composite upload
if (totalSize <= chunkSize) {
const body = await toUploadBody(data, useGzip)

const res = await this.envdApi.api.POST('/files', {
params: {
query: {
path: destination,
username: user,
},
},
bodySerializer: () => body,
headers,
signal: this.connectionConfig.getSignal(opts?.requestTimeoutMs),
body: {},
})

const err = await handleFilesystemEnvdApiError(res)
if (err) {
throw err
}

const files = res.data as WriteInfo[]
if (!files || files.length === 0) {
throw new Error('Expected to receive information about written file')
}

return files[0]
}

// Split into chunks and upload in parallel
const chunkCount = Math.ceil(totalSize / chunkSize)
const uploadId = crypto.randomUUID()
const chunkPaths: string[] = []

for (let i = 0; i < chunkCount; i++) {
chunkPaths.push(`/tmp/.e2b-upload-${uploadId}-${i}`)
}

await Promise.all(
chunkPaths.map(async (chunkPath, i) => {
const start = i * chunkSize
const end = Math.min(start + chunkSize, totalSize)
const chunk = blob.slice(start, end)
const body = await toUploadBody(chunk, useGzip)

const res = await this.envdApi.api.POST('/files', {
params: {
query: {
path: chunkPath,
username: user,
},
},
bodySerializer: () => body,
headers,
signal: this.connectionConfig.getSignal(opts?.requestTimeoutMs),
body: {},
})

const err = await handleFilesystemEnvdApiError(res)
if (err) {
throw err
}
})
)

// Compose chunks into the final file
const composeRes = await this.envdApi.api.POST('/files/compose', {
body: {
source_paths: chunkPaths,
destination,
username: user,
},
signal: this.connectionConfig.getSignal(opts?.requestTimeoutMs),
})

const composeErr = await handleFilesystemEnvdApiError(composeRes)
if (composeErr) {
throw composeErr
}

return composeRes.data as WriteInfo
}
}
1 change: 1 addition & 0 deletions packages/python-sdk/e2b/envd/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@


ENVD_API_FILES_ROUTE = "/files"
ENVD_API_FILES_COMPOSE_ROUTE = "/files/compose"
ENVD_API_HEALTH_ROUTE = "/health"

_DEFAULT_API_ERROR_MAP: dict[int, Callable[[str], Exception]] = {
Expand Down
Loading
Loading