Skip to content

Commit 659d211

Browse files
committed
feat(prompts): attach prompts to storage stores in run configs
1 parent 658fb2c commit 659d211

File tree

23 files changed

+66
-8
lines changed

23 files changed

+66
-8
lines changed

benchmarking/k8s-benchmark/stack_run_config.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,9 @@ storage:
115115
conversations:
116116
table_name: openai_conversations
117117
backend: sql_default
118+
prompts:
119+
namespace: prompts
120+
backend: kv_default
118121
registered_resources:
119122
models:
120123
- metadata:

docs/docs/distributions/k8s/stack_run_config.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@ storage:
113113
conversations:
114114
table_name: openai_conversations
115115
backend: sql_default
116+
prompts:
117+
namespace: prompts
118+
backend: kv_default
116119
registered_resources:
117120
models:
118121
- metadata:

llama_stack/core/datatypes.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -582,6 +582,7 @@ def _ensure_backend(reference, expected_set, store_name: str) -> None:
582582
_ensure_backend(stores.inference, sql_backends, "storage.stores.inference")
583583
_ensure_backend(stores.conversations, sql_backends, "storage.stores.conversations")
584584
_ensure_backend(stores.responses, sql_backends, "storage.stores.responses")
585+
_ensure_backend(stores.prompts, kv_backends, "storage.stores.prompts")
585586
return self
586587

587588

llama_stack/core/prompts/prompts.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
from llama_stack.apis.prompts import ListPromptsResponse, Prompt, Prompts
1313
from llama_stack.core.datatypes import StackRunConfig
14-
from llama_stack.core.storage.datatypes import KVStoreReference
1514
from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
1615

1716

@@ -40,11 +39,10 @@ def __init__(self, config: PromptServiceConfig, deps: dict[Any, Any]):
4039
self.kvstore: KVStore
4140

4241
async def initialize(self) -> None:
43-
# Use metadata store backend with prompts-specific namespace
44-
metadata_ref = self.config.run_config.storage.stores.metadata
45-
if not metadata_ref:
46-
raise ValueError("storage.stores.metadata must be configured in run config")
47-
prompts_ref = KVStoreReference(namespace="prompts", backend=metadata_ref.backend)
42+
# Use prompts store reference from run config
43+
prompts_ref = self.config.run_config.storage.stores.prompts
44+
if not prompts_ref:
45+
raise ValueError("storage.stores.prompts must be configured in run config")
4846
self.kvstore = await kvstore_impl(prompts_ref)
4947

5048
def _get_default_key(self, prompt_id: str) -> str:

llama_stack/core/stack.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -540,6 +540,7 @@ def run_config_from_adhoc_config_spec(
540540
metadata=KVStoreReference(backend="kv_default", namespace="registry"),
541541
inference=InferenceStoreReference(backend="sql_default", table_name="inference_store"),
542542
conversations=SqlStoreReference(backend="sql_default", table_name="openai_conversations"),
543+
prompts=KVStoreReference(backend="kv_default", namespace="prompts"),
543544
),
544545
),
545546
)

llama_stack/core/storage/datatypes.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,10 @@ class ServerStoresConfig(BaseModel):
271271
default=None,
272272
description="Responses store configuration (uses SQL backend)",
273273
)
274+
prompts: KVStoreReference | None = Field(
275+
default=None,
276+
description="Prompts store configuration (uses KV backend)",
277+
)
274278

275279

276280
class StorageConfig(BaseModel):

llama_stack/distributions/ci-tests/run.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -247,6 +247,9 @@ storage:
247247
conversations:
248248
table_name: openai_conversations
249249
backend: sql_default
250+
prompts:
251+
namespace: prompts
252+
backend: kv_default
250253
registered_resources:
251254
models: []
252255
shields:

llama_stack/distributions/dell/run-with-safety.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,9 @@ storage:
109109
conversations:
110110
table_name: openai_conversations
111111
backend: sql_default
112+
prompts:
113+
namespace: prompts
114+
backend: kv_default
112115
registered_resources:
113116
models:
114117
- metadata: {}

llama_stack/distributions/dell/run.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,9 @@ storage:
105105
conversations:
106106
table_name: openai_conversations
107107
backend: sql_default
108+
prompts:
109+
namespace: prompts
110+
backend: kv_default
108111
registered_resources:
109112
models:
110113
- metadata: {}

llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,9 @@ storage:
122122
conversations:
123123
table_name: openai_conversations
124124
backend: sql_default
125+
prompts:
126+
namespace: prompts
127+
backend: kv_default
125128
registered_resources:
126129
models:
127130
- metadata: {}

0 commit comments

Comments
 (0)