Skip to content

Commit

Permalink
add --file flag
Browse files Browse the repository at this point in the history
  • Loading branch information
aantn committed Jun 20, 2024
1 parent 0a30aea commit adccfd1
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 2 deletions.
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,17 @@ holmes investigate alertmanager --alertmanager-url http://localhost:9093
Note - if on Mac OS and using the Docker image, you will need to use `http://docker.for.mac.localhost:9093` instead of `http://localhost:9093`
</details>

<details>
<summary>Investigate a Local Log File</summary>

Attach files to the HolmesGPT session with `-f`:

```console
sudo dmesg > dmesg.log
poetry run python3 holmes.py ask "investigate errors in this dmesg log" -f dmesg.log
```
</details>

<details>

<summary>Investigate and update Jira tickets with findings</summary>
Expand Down
14 changes: 12 additions & 2 deletions holmes.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
import re
import warnings
from pathlib import Path
from typing import List, Optional, Pattern
import json
from typing import List, Optional
import typer
from rich.console import Console
from rich.logging import RichHandler
Expand Down Expand Up @@ -135,6 +134,12 @@ def ask(
"--show-tool-output",
help="Advanced. Show the output of each tool that was called",
),
include_file: Optional[List[Path]] = typer.Option(
[],
"--file",
"-f",
help="File to append to prompt (can specify -f multiple times to add multiple files)",
),
json_output_file: Optional[str] = opt_json_output_file
):
"""
Expand All @@ -153,6 +158,11 @@ def ask(
system_prompt = load_prompt(system_prompt)
ai = config.create_toolcalling_llm(console, allowed_toolsets)
console.print("[bold yellow]User:[/bold yellow] " + prompt)
for path in include_file:
f = path.open("r")
prompt += f"\n\nAttached file '{path.absolute()}':\n{f.read()}"
console.print(f"[bold yellow]Loading file {path}[/bold yellow]")

response = ai.call(system_prompt, prompt)
text_result = Markdown(response.result)
if json_output_file:
Expand Down
1 change: 1 addition & 0 deletions holmes/core/tool_calling_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def call(self, system_prompt, user_prompt) -> LLMResult:
# on the last step we don't allow tools - we want to force a reply, not a request to run another tool
tools = NOT_GIVEN if i == self.max_steps - 1 else tools
tool_choice = NOT_GIVEN if tools == NOT_GIVEN else "auto"
logging.debug(f"sending messages {messages}")
try:
full_response = self.client.chat.completions.create(
model=self.model,
Expand Down

0 comments on commit adccfd1

Please sign in to comment.