-
Notifications
You must be signed in to change notification settings - Fork 2k
Expand file tree
/
Copy pathdaemon_cli.rs
More file actions
138 lines (107 loc) · 4.5 KB
/
daemon_cli.rs
File metadata and controls
138 lines (107 loc) · 4.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
use std::time::Duration;
use clap::{command, Parser};
use fhevm_engine_common::telemetry::MetricsConfig;
use fhevm_engine_common::utils::DatabaseURL;
use humantime::parse_duration;
use sns_worker::{SchedulePolicy, SNS_LATENCY_OP_HISTOGRAM_CONF};
use tracing::Level;
#[derive(Parser, Debug, Clone)]
#[command(version, about, long_about = None)]
pub struct Args {
/// Tenant API key
#[arg(long)]
pub tenant_api_key: String,
/// Work items batch size
#[arg(long, default_value_t = 4)]
pub work_items_batch_size: u32,
/// Garbage collection batch size
#[arg(long, default_value_t = 80)]
pub gc_batch_size: u32,
/// NOTIFY/LISTEN channels for database that the worker listen to
#[arg(long, num_args(1..))]
pub pg_listen_channels: Vec<String>,
/// NOTIFY/LISTEN channel for database that the worker notify to
#[arg(long)]
pub pg_notify_channel: String,
/// Polling interval in seconds
#[arg(long, default_value_t = 60)]
pub pg_polling_interval: u32,
/// Postgres pool connections
#[arg(long, default_value_t = 10)]
pub pg_pool_connections: u32,
/// Postgres acquire timeout
#[arg(long, default_value = "15s", value_parser = parse_duration)]
pub pg_timeout: Duration,
/// Postgres diagnostics: enable auto_explain extension
#[arg(long, value_parser = parse_duration)]
pub pg_auto_explain_with_min_duration: Option<Duration>,
/// Postgres database url. If unspecified DATABASE_URL environment variable
/// is used
#[arg(long)]
pub database_url: Option<DatabaseURL>,
/// KeySet file. If unspecified the keys are read from the database
#[arg(long)]
pub keys_file_path: Option<String>,
/// sns-executor service name in OTLP traces
#[arg(long, default_value = "sns-executor")]
pub service_name: String,
/// S3 bucket name for ct128 ciphertexts
/// See also: general purpose buckets naming rules
#[arg(long, default_value = "ct128")]
pub bucket_name_ct128: String,
/// S3 bucket name for ct64 ciphertexts
/// See also: general purpose buckets naming rules
#[arg(long, default_value = "ct64")]
pub bucket_name_ct64: String,
/// Maximum number of concurrent uploads to S3
#[arg(long, default_value_t = 100)]
pub s3_max_concurrent_uploads: u32,
#[arg(long, default_value_t = 100)]
pub s3_max_retries_per_upload: u32,
#[arg(long, default_value = "10s", value_parser = parse_duration)]
pub s3_max_backoff: Duration,
#[arg(long, default_value = "120s", value_parser = parse_duration)]
pub s3_max_retries_timeout: Duration,
#[arg(long, default_value = "2s", value_parser = parse_duration)]
pub s3_recheck_duration: Duration,
#[arg(long, default_value = "120s", value_parser = parse_duration)]
pub s3_regular_recheck_duration: Duration,
#[arg(long, default_value = "120s", value_parser = parse_duration)]
pub cleanup_interval: Duration,
#[arg(
long,
value_parser = clap::value_parser!(Level),
default_value_t = Level::INFO)]
pub log_level: Level,
/// HTTP server port for health checks
#[arg(long, default_value_t = 8080)]
pub health_check_port: u16,
/// Prometheus metrics server address
#[arg(long, default_value = "0.0.0.0:9100")]
pub metrics_addr: Option<String>,
/// Liveness threshold for health checks
/// Exceeding this threshold means that the worker is stuck
/// and will be restarted by the orchestrator
#[arg(long, default_value = "70s", value_parser = parse_duration)]
pub liveness_threshold: Duration,
/// LIFO (Last In, First Out) processing
/// If true, the worker will process the most recent tasks
/// if false, default FIFO (First In, First Out) processing is used
#[arg(long, default_value_t = false)]
pub lifo: bool,
/// Enable compression of big ciphertexts before uploading to S3
#[arg(long, default_value_t = true)]
pub enable_compression: bool,
/// Schedule policy for processing tasks
#[arg(long, default_value = "rayon_parallel", value_parser = clap::value_parser!(SchedulePolicy))]
pub schedule_policy: SchedulePolicy,
/// Prometheus metrics: coprocessor_sns_op_latency_seconds
#[arg(long, default_value = "0.1:10.0:0.1", value_parser = clap::value_parser!(MetricsConfig))]
pub metric_sns_op_latency: MetricsConfig,
}
pub fn parse_args() -> Args {
let args = Args::parse();
// Set global configs from args
let _ = SNS_LATENCY_OP_HISTOGRAM_CONF.set(args.metric_sns_op_latency);
args
}