diff --git a/CHANGELOG.md b/CHANGELOG.md index c2b014e..335f35d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ * Replace sqlite3 library with [sqlean](https://antonz.org/sqlean/). It's a drop-in replacement for sqlite3. * Add support for `.output` to write the results to a file. +* The 'llm' library is now a default dependency not installed on demand. +* The `\llm` command now has three modes. Succinct, Regular and Verbose. + + Succinct = `\llm-` - This will return just the sql query. No explanation. + Regular = `\llm` - This will return just the sql query and the explanation. + Verbose = `\llm+` - This will print the prompt sent to the LLM and the sql query and the explanation. ### Bug Fixes diff --git a/litecli/packages/special/llm.py b/litecli/packages/special/llm.py index 956d659..bc99c45 100644 --- a/litecli/packages/special/llm.py +++ b/litecli/packages/special/llm.py @@ -2,34 +2,29 @@ import io import logging import os +import pprint import re import shlex import sys from runpy import run_module -from typing import Optional, Tuple from time import time +from typing import Optional, Tuple import click - -try: - import llm - from llm.cli import cli - - LLM_CLI_COMMANDS = list(cli.commands.keys()) - MODELS = {x.model_id: None for x in llm.get_models()} -except ImportError: - llm = None - cli = None - LLM_CLI_COMMANDS = [] - MODELS = {} +import llm +from llm.cli import cli from . import export -from .main import parse_special_command +from .main import Verbosity, parse_special_command log = logging.getLogger(__name__) +LLM_TEMPLATE_NAME = "litecli-llm-template" +LLM_CLI_COMMANDS = list(cli.commands.keys()) +MODELS = {x.model_id: None for x in llm.get_models()} + -def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_exception=True): +def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_exception=True) -> Tuple[int, str]: original_exe = sys.executable original_args = sys.argv @@ -55,6 +50,13 @@ def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_ raise RuntimeError(buffer.getvalue()) else: raise RuntimeError(f"Command {cmd} failed with exit code {code}.") + except Exception as e: + code = 1 + if raise_exception: + if capture_output: + raise RuntimeError(buffer.getvalue()) + else: + raise RuntimeError(f"Command {cmd} failed: {e}") if restart_cli and code == 0: os.execv(original_exe, [original_exe] + original_args) @@ -153,45 +155,46 @@ def __init__(self, results=None): """ _SQL_CODE_FENCE = r"```sql\n(.*?)\n```" -PROMPT = """A SQLite database has the following schema: +PROMPT = """ +You are a helpful assistant who is a SQLite expert. You are embedded in a SQLite +cli tool called litecli. -$db_schema +Answer this question: + +$question -Here is a sample row of data from each table: $sample_data +Use the following context if it is relevant to answering the question. If the +question is not about the current database then ignore the context. -Use the provided schema and the sample data to construct a SQL query that -can be run in SQLite3 to answer +You are connected to a SQLite database with the following schema: -$question +$db_schema + +Here is a sample row of data from each table: + +$sample_data -Explain the reason for choosing each table in the SQL query you have -written. Keep the explanation concise. -Finally include a sql query in a code fence such as this one: +If the answer can be found using a SQL query, include a sql query in a code +fence such as this one: ```sql SELECT count(*) FROM table_name; ``` +Keep your explanation concise and focused on the question asked. """ -def initialize_llm(): - # Initialize the LLM library. - if click.confirm("This feature requires additional libraries. Install LLM library?", default=True): - click.echo("Installing LLM library. Please wait...") - run_external_cmd("pip", "install", "--quiet", "llm", restart_cli=True) - - def ensure_litecli_template(replace=False): """ Create a template called litecli with the default prompt. """ if not replace: # Check if it already exists. - code, _ = run_external_cmd("llm", "templates", "show", "litecli", capture_output=True, raise_exception=False) + code, _ = run_external_cmd("llm", "templates", "show", LLM_TEMPLATE_NAME, capture_output=True, raise_exception=False) if code == 0: # Template already exists. No need to create it. return - run_external_cmd("llm", PROMPT, "--save", "litecli") + run_external_cmd("llm", PROMPT, "--save", LLM_TEMPLATE_NAME) return @@ -205,12 +208,10 @@ def handle_llm(text, cur) -> Tuple[str, Optional[str], float]: FinishIteration() which will be caught by the main loop AND print any output that was supplied (or None). """ - _, verbose, arg = parse_special_command(text) - - # LLM is not installed. - if llm is None: - initialize_llm() - raise FinishIteration(None) + # Determine invocation mode: regular, verbose (+), or succinct (-) + _, mode, arg = parse_special_command(text) + is_verbose = mode is Verbosity.VERBOSE + is_succinct = mode is Verbosity.SUCCINCT if not arg.strip(): # No question provided. Print usage and bail. output = [(None, None, None, USAGE)] @@ -268,20 +269,23 @@ def handle_llm(text, cur) -> Tuple[str, Optional[str], float]: output = [(None, None, None, result)] raise FinishIteration(output) - return result if verbose else "", sql, end - start + context = "" if is_succinct else result + return context, sql, end - start else: run_external_cmd("llm", *args, restart_cli=restart) raise FinishIteration(None) try: ensure_litecli_template() - # Measure end to end llm command invocation. - # This measures the internal DB command to pull the schema and llm command + # Measure end-to-end LLM command invocation (schema gathering and LLM call) start = time() - context, sql = sql_using_llm(cur=cur, question=arg, verbose=verbose) + result, sql, prompt_text = sql_using_llm(cur=cur, question=arg, verbose=is_verbose) end = time() - if not verbose: - context = "" + context = "" if is_succinct else result + if is_verbose and prompt_text is not None: + click.echo("LLM Prompt:") + click.echo(prompt_text) + click.echo("---") return context, sql, end - start except Exception as e: # Something went wrong. Raise an exception and bail. @@ -298,7 +302,7 @@ def is_llm_command(command) -> bool: @export -def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str]]: +def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str], Optional[str]]: if cur is None: raise RuntimeError("Connect to a datbase and try again.") schema_query = """ @@ -331,7 +335,7 @@ def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str] args = [ "--template", - "litecli", + LLM_TEMPLATE_NAME, "--param", "db_schema", db_schema, @@ -347,9 +351,16 @@ def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str] _, result = run_external_cmd("llm", *args, capture_output=True) click.echo("Received response from the llm command") match = re.search(_SQL_CODE_FENCE, result, re.DOTALL) - if match: - sql = match.group(1).strip() - else: - sql = "" - - return result, sql + sql = match.group(1).strip() if match else "" + + # When verbose, build and return the rendered prompt text + prompt_text = None + if verbose: + # Render the prompt by substituting schema, sample_data, and question + prompt_text = PROMPT + prompt_text = prompt_text.replace("$db_schema", db_schema) + prompt_text = prompt_text.replace("$sample_data", pprint.pformat(sample_data)) + prompt_text = prompt_text.replace("$question", question or "") + if verbose: + return result, sql, prompt_text + return result, sql, None diff --git a/litecli/packages/special/main.py b/litecli/packages/special/main.py index 9544811..826b9d6 100644 --- a/litecli/packages/special/main.py +++ b/litecli/packages/special/main.py @@ -3,6 +3,7 @@ from collections import namedtuple from . import export +from enum import Enum log = logging.getLogger(__name__) @@ -36,12 +37,29 @@ class CommandNotFound(Exception): pass +class Verbosity(Enum): + """Invocation verbosity: succinct (-), normal, or verbose (+).""" + + SUCCINCT = "succinct" + NORMAL = "normal" + VERBOSE = "verbose" + + @export def parse_special_command(sql): + """ + Parse a special command, extracting the base command name, verbosity + (normal, verbose (+), or succinct (-)), and the remaining argument. + Mirrors mycli's behavior. + """ command, _, arg = sql.partition(" ") - verbose = "+" in command - command = command.strip().replace("+", "") - return (command, verbose, arg.strip()) + verbosity = Verbosity.NORMAL + if "+" in command: + verbosity = Verbosity.VERBOSE + elif "-" in command: + verbosity = Verbosity.SUCCINCT + command = command.strip().strip("+-") + return (command, verbosity, arg.strip()) @export @@ -101,7 +119,7 @@ def execute(cur, sql): """Execute a special command and return the results. If the special command is not supported a KeyError will be raised. """ - command, verbose, arg = parse_special_command(sql) + command, verbosity, arg = parse_special_command(sql) if (command not in COMMANDS) and (command.lower() not in COMMANDS): raise CommandNotFound @@ -116,7 +134,7 @@ def execute(cur, sql): if special_cmd.arg_type == NO_QUERY: return special_cmd.handler() elif special_cmd.arg_type == PARSED_QUERY: - return special_cmd.handler(cur=cur, arg=arg, verbose=verbose) + return special_cmd.handler(cur=cur, arg=arg, verbose=(verbosity == Verbosity.VERBOSE)) elif special_cmd.arg_type == RAW_QUERY: return special_cmd.handler(cur=cur, query=sql) diff --git a/pyproject.toml b/pyproject.toml index 8046986..e2f8eef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "sqlparse>=0.4.4", "setuptools", # Required by llm commands to install models "pip", + "llm>=0.25.0", ] [build-system] diff --git a/tests/conftest.py b/tests/conftest.py index c6524ca..35d79b5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,10 @@ from __future__ import print_function import os + import pytest from utils import create_db, db_connection, drop_tables + import litecli.sqlexecute diff --git a/tests/test_llm_special.py b/tests/test_llm_special.py index 9de29df..2e48b95 100644 --- a/tests/test_llm_special.py +++ b/tests/test_llm_special.py @@ -3,22 +3,6 @@ from litecli.packages.special.llm import handle_llm, FinishIteration, USAGE -@patch("litecli.packages.special.llm.initialize_llm") -@patch("litecli.packages.special.llm.llm", new=None) -def test_llm_command_without_install(mock_initialize_llm, executor): - """ - Test that handle_llm initializes llm when it is None and raises FinishIteration. - """ - test_text = r"\llm" - cur_mock = executor - - with pytest.raises(FinishIteration) as exc_info: - handle_llm(test_text, cur_mock) - - mock_initialize_llm.assert_called_once() - assert exc_info.value.args[0] is None - - @patch("litecli.packages.special.llm.llm") def test_llm_command_without_args(mock_llm, executor): r""" @@ -61,11 +45,8 @@ def test_llm_command_with_c_flag_and_fenced_sql(mock_run_cmd, mock_llm, executor result, sql, duration = handle_llm(test_text, executor) - # We expect the function to return (result, sql), but result might be "" if verbose is not set - # By default, `verbose` is false unless text has something like \llm --verbose? - # The function code: return result if verbose else "", sql - # Our test_text doesn't set verbose => we expect "" for the returned context. - assert result == "" + # In regular mode, context is returned + assert result == return_text assert sql == "SELECT * FROM table;" assert isinstance(duration, float) @@ -133,7 +114,7 @@ def test_llm_command_with_prompt(mock_sql_using_llm, mock_ensure_template, mock_ Should use context, capture output, and call sql_using_llm. """ # Mock out the return from sql_using_llm - mock_sql_using_llm.return_value = ("context from LLM", "SELECT 1;") + mock_sql_using_llm.return_value = ("context from LLM", "SELECT 1;", None) test_text = r"\llm prompt 'Magic happening here?'" context, sql, duration = handle_llm(test_text, executor) @@ -144,7 +125,7 @@ def test_llm_command_with_prompt(mock_sql_using_llm, mock_ensure_template, mock_ # Actually, the question is the entire "prompt 'Magic happening here?'" minus the \llm # But in the function we do parse shlex.split. mock_sql_using_llm.assert_called() - assert context == "" + assert context == "context from LLM" assert sql == "SELECT 1;" assert isinstance(duration, float) @@ -156,14 +137,14 @@ def test_llm_command_question_with_context(mock_sql_using_llm, mock_ensure_templ """ If arg doesn't contain any known command, it's treated as a question => capture output + context. """ - mock_sql_using_llm.return_value = ("You have context!", "SELECT 2;") + mock_sql_using_llm.return_value = ("You have context!", "SELECT 2;", None) test_text = r"\llm 'Top 10 downloads by size.'" context, sql, duration = handle_llm(test_text, executor) mock_ensure_template.assert_called_once() mock_sql_using_llm.assert_called() - assert context == "" + assert context == "You have context!" assert sql == "SELECT 2;" assert isinstance(duration, float) @@ -175,7 +156,7 @@ def test_llm_command_question_verbose(mock_sql_using_llm, mock_ensure_template, r""" Invoking \llm+ returns the context and the SQL query. """ - mock_sql_using_llm.return_value = ("Verbose context, oh yeah!", "SELECT 42;") + mock_sql_using_llm.return_value = ("Verbose context, oh yeah!", "SELECT 42;", None) test_text = r"\llm+ 'Top 10 downloads by size.'" context, sql, duration = handle_llm(test_text, executor) diff --git a/tests/test_special_iocommands.py b/tests/test_special_iocommands.py index ec60163..b017666 100644 --- a/tests/test_special_iocommands.py +++ b/tests/test_special_iocommands.py @@ -4,6 +4,7 @@ import pytest import litecli.packages.special +from litecli.packages.special.main import parse_special_command, Verbosity def test_once_command(): @@ -57,3 +58,32 @@ def test_pipe_once_command(): litecli.packages.special.unset_pipe_once_if_written() f.seek(0) assert f.read() == b"hello world\n" + + +@pytest.mark.parametrize( + "sql,expected", + [ + (r"\d table_name", ("\\d", Verbosity.NORMAL, "table_name")), + (r"\d+ table_name", ("\\d", Verbosity.VERBOSE, "table_name")), + (r"\?", ("\\?", Verbosity.NORMAL, "")), + (r"\llm Question", ("\\llm", Verbosity.NORMAL, "Question")), + (r"\llm-", ("\\llm", Verbosity.SUCCINCT, "")), + (r"\llm+", ("\\llm", Verbosity.VERBOSE, "")), + ], +) +def test_parse_special_command(sql, expected): + """ + Ensure parse_special_command correctly splits the command and mode. + """ + result = parse_special_command(sql) + assert result == expected + + +def test_parse_special_command_edge_cases(): + # mycli-compatible behavior: no ValueError on special characters; it parses leniently. + sql = r"\llm* Question" + assert parse_special_command(sql) == ("\\llm*", Verbosity.NORMAL, "Question") + + sql = r"\llm+- Question" + # '+' in command sets verbosity; strip('+-') removes both suffixes + assert parse_special_command(sql) == ("\\llm", Verbosity.VERBOSE, "Question")