-
Notifications
You must be signed in to change notification settings - Fork 101
feat: add host-based upload support to rattler_upload
#1580
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 40 commits
c2ecece
7796ddd
d05406c
a490759
9e1a01a
676a6f7
63f33ee
29a1f5c
fc4a790
96ea212
7718b02
cfc2690
9f3b4bb
05e9c12
00d0a45
0bf614c
af31a6a
9c8c525
1c0fdd8
09777b0
4573261
1ead736
5f71202
d23039e
0958d9f
db59292
8b5e6e8
99dd9e7
9eadb87
bb464fb
c709b62
5a824b4
3ae4aef
d18ba47
09ddbb4
059db57
16d1fd0
9d62564
e3fa364
54c5c18
9bf9bb2
5fc6195
14a0e82
7650fa2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -50,6 +50,7 @@ tokio = { workspace = true, features = [ | |
"rt-multi-thread", | ||
"process", | ||
] } | ||
regex = "1.10" | ||
|
||
|
||
[target.'cfg(not(target_os = "windows"))'.dependencies] | ||
sha2 = { workspace = true, features = ["asm"] } | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,13 +1,26 @@ | ||
pub mod upload; | ||
pub(crate) mod utils; | ||
|
||
use crate::upload::opt::{AnacondaOpts, ArtifactoryOpts, CondaForgeOpts, PrefixOpts}; | ||
use crate::utils::server_util::{ | ||
check_server_type, extract_anaconda_info, extract_artifactory_info, extract_conda_forge_info, | ||
extract_prefix_info, extract_quetz_info, SimpleServerType, | ||
}; | ||
use crate::utils::tool_configuration; | ||
use miette::IntoDiagnostic; | ||
use rattler_conda_types::package::ArchiveType; | ||
use upload::opt::{ | ||
AnacondaData, ArtifactoryData, CondaForgeData, PrefixData, QuetzData, ServerType, UploadOpts, | ||
AnacondaData, ArtifactoryData, CondaForgeData, PrefixData, QuetzData, QuetzOpts, ServerType, | ||
UploadOpts, | ||
}; | ||
|
||
use crate::utils::tool_configuration; | ||
#[cfg(feature = "s3")] | ||
use crate::upload::opt::{S3Data, S3Opts}; | ||
#[cfg(feature = "s3")] | ||
use crate::utils::server_util::extract_s3_info; | ||
#[cfg(feature = "s3")] | ||
use rattler_s3::clap::S3AddressingStyleOpts::VirtualHost; | ||
|
||
/// Upload package to different channels | ||
pub async fn upload_from_args(args: UploadOpts) -> miette::Result<()> { | ||
// Validate package files are provided | ||
|
@@ -29,8 +42,109 @@ pub async fn upload_from_args(args: UploadOpts) -> miette::Result<()> { | |
let store = tool_configuration::get_auth_store(args.common.auth_file, args.auth_store) | ||
.into_diagnostic()?; | ||
|
||
// Check server type from host (if provided) | ||
let detected_type: SimpleServerType = match &args.host { | ||
Some(host_url) => check_server_type(host_url), | ||
None => SimpleServerType::Unknown, | ||
}; | ||
|
||
// Use detected type if available, otherwise fall back to provided server_type | ||
let server_type = match detected_type { | ||
SimpleServerType::Unknown => { | ||
// If detection failed, use provided subcommand server_type or return error | ||
match args.server_type { | ||
Some(server_type) => server_type, | ||
None => { | ||
return Err(std::io::Error::new( | ||
std::io::ErrorKind::InvalidInput, | ||
"Cannot determine server type from host and no server type provided", | ||
)) | ||
.into_diagnostic() | ||
} | ||
} | ||
} | ||
SimpleServerType::Quetz => { | ||
let host_url = args.host.as_ref().unwrap(); | ||
let (base_url, channel) = | ||
extract_quetz_info(host_url).expect("Failed to parse Quetz URL"); | ||
ServerType::Quetz(QuetzOpts { | ||
url: base_url, | ||
channels: channel, | ||
api_key: None, | ||
}) | ||
} | ||
SimpleServerType::Artifactory => { | ||
let host_url = args.host.as_ref().unwrap(); | ||
let (base_url, channel) = | ||
extract_artifactory_info(host_url).expect("Failed to parse Artifactory URL"); | ||
ServerType::Artifactory(ArtifactoryOpts { | ||
url: base_url, | ||
channels: channel, | ||
username: None, | ||
password: None, | ||
token: None, | ||
}) | ||
} | ||
SimpleServerType::Prefix => { | ||
let host_url = args.host.as_ref().unwrap(); | ||
let (base_url, channel) = | ||
extract_prefix_info(host_url).expect("Failed to parse Prefix URL"); | ||
ServerType::Prefix(PrefixOpts { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't we fill in e.g.. API key from the args here? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hi, Wolf. There're three things I need to explain.
![]()
Only when you specify the subcommand as "prefix", can you pass the arg |
||
url: base_url, | ||
channel, | ||
api_key: None, | ||
attestation: None, | ||
skip_existing: false, | ||
}) | ||
} | ||
SimpleServerType::Anaconda => { | ||
let host_url = args.host.as_ref().unwrap(); | ||
let (base_url, channel) = | ||
extract_anaconda_info(host_url).expect("Failed to parse Anaconda URL"); | ||
ServerType::Anaconda(AnacondaOpts { | ||
url: Some(base_url), | ||
channels: Some(channel), | ||
api_key: None, | ||
owner: "".to_string(), | ||
force: false, | ||
}) | ||
} | ||
#[cfg(feature = "s3")] | ||
SimpleServerType::S3 => { | ||
let host_url = args.host.as_ref().unwrap(); | ||
let (endpoint_url, channel, region) = | ||
extract_s3_info(host_url).expect("Failed to parse S3 URL"); | ||
ServerType::S3(S3Opts { | ||
channel, | ||
endpoint_url, | ||
region, | ||
force_path_style: false, | ||
access_key_id: None, | ||
secret_access_key: None, | ||
session_token: None, | ||
credentials: None, | ||
force: false, | ||
addressing_style: VirtualHost, | ||
}) | ||
} | ||
SimpleServerType::CondaForge => { | ||
let host_url = args.host.as_ref().unwrap(); | ||
let (base_url, channel) = | ||
extract_conda_forge_info(host_url).expect("Failed to parse Conda Forge URL"); | ||
ServerType::CondaForge(CondaForgeOpts { | ||
anaconda_url: Some(base_url), | ||
staging_channel: Some(channel), | ||
staging_token: "".to_string(), | ||
feedstock: "".to_string(), | ||
feedstock_token: "".to_string(), | ||
validation_endpoint: None, | ||
provider: None, | ||
dry_run: false, | ||
}) | ||
} | ||
}; | ||
// Upload handler based on server type | ||
match args.server_type { | ||
match server_type { | ||
ServerType::Quetz(quetz_opts) => { | ||
let quetz_data = QuetzData::from(quetz_opts); | ||
upload::upload_package_to_quetz(&store, &args.package_files, quetz_data).await | ||
|
@@ -51,12 +165,14 @@ pub async fn upload_from_args(args: UploadOpts) -> miette::Result<()> { | |
} | ||
#[cfg(feature = "s3")] | ||
ServerType::S3(s3_opts) => { | ||
let s3_data = S3Data::from(s3_opts); | ||
upload::upload_package_to_s3( | ||
&store, | ||
s3_opts.channel, | ||
s3_opts.credentials.into(), | ||
s3_data.channel, | ||
s3_data.credentials, | ||
&args.package_files, | ||
s3_opts.force, | ||
s3_data.region, | ||
s3_data.force, // force parameter - using false as default | ||
) | ||
.await | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is this now optional?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Because it supports upload by URL now, and the region can be written in the URL. For example, the user can upload like this " https://.s3..amazonaws.com/my-channel"