Skip to content

Commit 1f0afff

Browse files
committed
refactor: error handling
1 parent 1f70756 commit 1f0afff

File tree

5 files changed

+48
-23
lines changed

5 files changed

+48
-23
lines changed

Cargo.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

crates/lsp-ai/Cargo.toml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,13 +33,16 @@ tokio = { version = "1.36.0", features = ["rt-multi-thread", "time"] }
3333
indexmap = "2.2.5"
3434
async-trait = "0.1.78"
3535
tree-sitter = "0.22"
36-
utils-tree-sitter = { path = "../utils-tree-sitter", features = ["all"], version = "0.1.0" }
36+
utils-tree-sitter = { path = "../utils-tree-sitter", features = [
37+
"all",
38+
], version = "0.1.0" }
3739
splitter-tree-sitter = { path = "../splitter-tree-sitter", version = "0.1.0" }
3840
text-splitter = { version = "0.13.3" }
3941
md5 = "0.7.0"
42+
thiserror = "1"
4043

4144
[build-dependencies]
42-
cc="*"
45+
cc = "*"
4346

4447
[features]
4548
default = []

crates/lsp-ai/src/config.rs

Lines changed: 27 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,21 @@
1-
use anyhow::{Context, Result};
21
use serde::{Deserialize, Serialize};
32
use serde_json::Value;
43
use std::collections::HashMap;
54

5+
#[derive(thiserror::Error, Debug)]
6+
pub(crate) enum ConfigError {
7+
#[error("completion is disabled")]
8+
CompletionDisabled,
9+
#[error("`{0}` model not found in `models` config")]
10+
ModelNotFound(String),
11+
#[error("lsp-ai does not currently provide a default configuration. Please pass a configuration. See https://github.com/SilasMarvin/lsp-ai for configuration options and examples")]
12+
NoDefaultConfig,
13+
#[error("server configuration must be a valid JSON object")]
14+
NotJson,
15+
#[error("serde json error: {0}")]
16+
SerdeJson(#[from] serde_json::Error),
17+
}
18+
619
pub(crate) type Kwargs = HashMap<String, Value>;
720

821
const fn max_requests_per_second_default() -> f32 {
@@ -315,15 +328,15 @@ pub struct Config {
315328
}
316329

317330
impl Config {
318-
pub fn new(mut args: Value) -> Result<Self> {
331+
pub fn new(mut args: Value) -> Result<Self, ConfigError> {
319332
// Validate that the models specfied are there so we can unwrap
320333
let configuration_args = args
321334
.as_object_mut()
322-
.context("Server configuration must be a JSON object")?
335+
.ok_or(ConfigError::NotJson)?
323336
.remove("initializationOptions");
324337
let valid_args = match configuration_args {
325338
Some(configuration_args) => serde_json::from_value(configuration_args)?,
326-
None => anyhow::bail!("lsp-ai does not currently provide a default configuration. Please pass a configuration. See https://github.com/SilasMarvin/lsp-ai for configuration options and examples"),
339+
None => return Err(ConfigError::NoDefaultConfig),
327340
};
328341
let client_params: ValidClientParams = serde_json::from_value(args)?;
329342
Ok(Self {
@@ -344,24 +357,19 @@ impl Config {
344357
self.config.completion.as_ref().map(|x| &x.post_process)
345358
}
346359

347-
pub fn get_completion_transformer_max_requests_per_second(&self) -> anyhow::Result<f32> {
360+
pub fn get_completion_transformer_max_requests_per_second(&self) -> Result<f32, ConfigError> {
361+
let completion_model = &self
362+
.config
363+
.completion
364+
.as_ref()
365+
.ok_or(ConfigError::CompletionDisabled)?
366+
.model;
348367
match &self
349368
.config
350369
.models
351-
.get(
352-
&self
353-
.config
354-
.completion
355-
.as_ref()
356-
.context("Completions is not enabled")?
357-
.model,
358-
)
359-
.with_context(|| {
360-
format!(
361-
"`{}` model not found in `models` config",
362-
&self.config.completion.as_ref().unwrap().model
363-
)
364-
})? {
370+
.get(completion_model)
371+
.ok_or_else(|| ConfigError::ModelNotFound(completion_model.to_owned()))?
372+
{
365373
#[cfg(feature = "llama_cpp")]
366374
ValidModel::LLaMACPP(llama_cpp) => Ok(llama_cpp.max_requests_per_second),
367375
ValidModel::OpenAI(open_ai) => Ok(open_ai.max_requests_per_second),

crates/lsp-ai/src/error.rs

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
use crate::config::ConfigError;
2+
3+
#[derive(thiserror::Error, Debug)]
4+
pub(crate) enum Error {
5+
#[error("config error: {0}")]
6+
Config(#[from] ConfigError),
7+
#[error("io error: {0}")]
8+
Io(#[from] std::io::Error),
9+
#[error("serde json error: {0}")]
10+
SerdeJson(#[from] serde_json::Error),
11+
}
12+
13+
pub(crate) type Result<T> = std::result::Result<T, Error>;

crates/lsp-ai/src/main.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
use anyhow::Result;
2-
31
use lsp_server::{Connection, ExtractError, Message, Notification, Request, RequestId};
42
use lsp_types::{
53
request::Completion, CompletionOptions, DidChangeTextDocumentParams, DidOpenTextDocumentParams,
@@ -16,6 +14,7 @@ use tracing_subscriber::{EnvFilter, FmtSubscriber};
1614
mod config;
1715
mod crawl;
1816
mod custom_requests;
17+
mod error;
1918
mod memory_backends;
2019
mod memory_worker;
2120
mod splitters;
@@ -27,6 +26,7 @@ mod utils;
2726

2827
use config::Config;
2928
use custom_requests::generation::Generation;
29+
use error::Result;
3030
use memory_backends::MemoryBackend;
3131
use transformer_backends::TransformerBackend;
3232
use transformer_worker::{CompletionRequest, GenerationRequest, WorkerRequest};

0 commit comments

Comments
 (0)