Skip to content

Commit b140715

Browse files
committed
Add Camel examples
1 parent 3fd2309 commit b140715

File tree

5 files changed

+277
-36
lines changed

5 files changed

+277
-36
lines changed

.gitignore

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,4 +170,8 @@ _build/
170170

171171
logs/
172172

173-
.DS_Store
173+
.DS_Store
174+
175+
# RAG data
176+
local_data/
177+
vim_docs/

crab/agents/backend_models/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
1414
# ruff: noqa: F401
1515
from .camel_model import CamelModel
16+
from .camel_rag_model import CamelRAGModel
1617
from .claude_model import ClaudeModel
1718
from .gemini_model import GeminiModel
1819
from .openai_model import OpenAIModel
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
#
6+
# http://www.apache.org/licenses/LICENSE-2.0
7+
#
8+
# Unless required by applicable law or agreed to in writing, software
9+
# distributed under the License is distributed on an "AS IS" BASIS,
10+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
# See the License for the specific language governing permissions and
12+
# limitations under the License.
13+
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
14+
from typing import Any, List, Optional, Tuple
15+
16+
from crab import BackendOutput, MessageType
17+
from crab.agents.backend_models.camel_model import CamelModel
18+
from camel.messages import BaseMessage
19+
20+
try:
21+
from camel.embeddings import OpenAIEmbedding
22+
from camel.retrievers import VectorRetriever
23+
from camel.storages import QdrantStorage
24+
RAG_ENABLED = True
25+
except ImportError:
26+
RAG_ENABLED = False
27+
28+
29+
class CamelRAGModel(CamelModel):
30+
def __init__(
31+
self,
32+
model: str,
33+
model_platform: str,
34+
parameters: dict[str, Any] | None = None,
35+
history_messages_len: int = 0,
36+
embedding_model: Optional[str] = "text-embedding-3-small",
37+
collection_name: str = "knowledge_base",
38+
vector_storage_path: str = "local_data",
39+
top_k: int = 3,
40+
similarity_threshold: float = 0.75,
41+
) -> None:
42+
if not RAG_ENABLED:
43+
raise ImportError(
44+
"Please install RAG dependencies: "
45+
"pip install camel-ai[embeddings,retrievers,storages]"
46+
)
47+
48+
super().__init__(model, model_platform, parameters, history_messages_len)
49+
50+
self.embedding_model = OpenAIEmbedding() if embedding_model else None
51+
52+
if self.embedding_model:
53+
self.vector_storage = QdrantStorage(
54+
vector_dim=self.embedding_model.get_output_dim(),
55+
path=vector_storage_path,
56+
collection_name=collection_name,
57+
)
58+
self.retriever = VectorRetriever(
59+
embedding_model=self.embedding_model
60+
)
61+
else:
62+
self.vector_storage = None
63+
self.retriever = None
64+
65+
self.top_k = top_k
66+
self.similarity_threshold = similarity_threshold
67+
68+
def process_documents(self, content_path: str) -> None:
69+
if not self.retriever or not self.vector_storage:
70+
raise ValueError("RAG components not initialized")
71+
72+
self.retriever.process(
73+
content=content_path,
74+
storage=self.vector_storage,
75+
)
76+
77+
def _enhance_with_context(self, messages: List[Tuple[str, MessageType]]) -> List[Tuple[str, MessageType]]:
78+
if not self.retriever or not self.vector_storage:
79+
return messages
80+
81+
query = next(
82+
(msg[0] for msg in messages if msg[1] != MessageType.IMAGE_JPG_BASE64),
83+
""
84+
)
85+
86+
retrieved_info = self.retriever.query(
87+
query=query,
88+
top_k=self.top_k,
89+
similarity_threshold=self.similarity_threshold,
90+
)
91+
92+
if not retrieved_info or retrieved_info[0].get('text', '').startswith('No suitable information'):
93+
return messages
94+
95+
context = "Relevant context:\n\n"
96+
for info in retrieved_info:
97+
context += f"From {info.get('content path', 'unknown')}:\n"
98+
context += f"{info.get('text', '')}\n\n"
99+
100+
enhanced_messages = []
101+
enhanced_messages.append((context, MessageType.TEXT))
102+
enhanced_messages.extend(messages)
103+
104+
return enhanced_messages
105+
106+
def chat(self, messages: List[Tuple[str, MessageType]]) -> BackendOutput:
107+
enhanced_messages = self._enhance_with_context(messages)
108+
return super().chat(enhanced_messages)

examples/camel_basic.py renamed to examples/camel_example.py

Lines changed: 17 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -12,35 +12,15 @@
1212
# limitations under the License.
1313
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
1414
from termcolor import colored
15-
16-
from camel.societies import RolePlaying
17-
from camel.utils import print_text_animated
15+
import os
1816

1917
from crab import Benchmark, create_benchmark
20-
from crab.agents.backend_models import OpenAIModel
18+
from crab.agents.backend_models.camel_model import CamelModel
2119
from crab.agents.policies import SingleAgentPolicy
2220
from crab.benchmarks.template import template_benchmark_config
21+
from camel.types import ModelType, ModelPlatformType
22+
from camel.models import ModelFactory
2323

24-
def camel_task_generator():
25-
task_prompt = "Design a custom game using pygame"
26-
print(colored(f"Original task prompt:\n{task_prompt}\n", "yellow"))
27-
role_play_session = RolePlaying("Computer Programmer", "Gamer", task_prompt=task_prompt)
28-
print(colored(f"Specified task prompt:\n{role_play_session.task_prompt}\n", "cyan"))
29-
30-
chat_turn_limit, n = 50, 0
31-
input_msg = role_play_session.init_chat()
32-
while n < chat_turn_limit:
33-
n += 1
34-
assistant_response, user_response = role_play_session.step(input_msg)
35-
print_text_animated(colored(f"AI User:\n\n{user_response.msg.content}\n", "blue"))
36-
print_text_animated(colored(f"AI Assistant:\n\n{assistant_response.msg.content}\n", "green"))
37-
38-
if "CAMEL_TASK_DONE" in user_response.msg.content:
39-
break
40-
41-
input_msg = assistant_response.msg
42-
43-
return role_play_session.task_prompt
4424

4525
def start_benchmark(benchmark: Benchmark, agent: SingleAgentPolicy):
4626
for step in range(20):
@@ -74,23 +54,25 @@ def start_benchmark(benchmark: Benchmark, agent: SingleAgentPolicy):
7454
print("=" * 40)
7555
print(
7656
colored(
77-
f"Task finished, result: {response.evaluation_results}",
78-
"green"
57+
f"Task finished, result: {response.evaluation_results}", "green"
7958
)
8059
)
8160
return
8261

83-
if __name__ == "__main__":
84-
task_description = camel_task_generator()
8562

63+
if __name__ == "__main__":
8664
benchmark = create_benchmark(template_benchmark_config)
87-
task, action_space = benchmark.start_task("0", task_description)
65+
task, action_space = benchmark.start_task("0")
8866
env_descriptions = benchmark.get_env_descriptions()
8967

90-
model = OpenAIModel(model="gpt-4o", history_messages_len=5)
91-
agent = SingleAgentPolicy(model_backend=model)
92-
agent.reset(task_description, action_space, env_descriptions)
93-
94-
print("Start performing task: " + colored(f'"{task_description}"', "green"))
68+
# TODO: Use local model
69+
camel_model = CamelModel(
70+
model="gpt-4o",
71+
model_platform=ModelPlatformType.OPENAI,
72+
parameters={"temperature": 0.7},
73+
)
74+
agent = SingleAgentPolicy(model_backend=camel_model)
75+
agent.reset(task.description, action_space, env_descriptions)
76+
print("Start performing task: " + colored(f'"{task.description}"', "green"))
9577
start_benchmark(benchmark, agent)
96-
benchmark.reset()
78+
benchmark.reset()

examples/camel_rag_example.py

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
#
6+
# http://www.apache.org/licenses/LICENSE-2.0
7+
#
8+
# Unless required by applicable law or agreed to in writing, software
9+
# distributed under the License is distributed on an "AS IS" BASIS,
10+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
# See the License for the specific language governing permissions and
12+
# limitations under the License.
13+
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
14+
from termcolor import colored
15+
import os
16+
import requests
17+
from bs4 import BeautifulSoup
18+
from urllib.parse import urljoin
19+
20+
from crab import Benchmark, create_benchmark
21+
from crab.agents.backend_models.camel_rag_model import CamelRAGModel
22+
from crab.agents.policies import SingleAgentPolicy
23+
from crab.benchmarks.template import template_benchmark_config
24+
from camel.types import ModelType, ModelPlatformType
25+
26+
27+
def start_benchmark(benchmark: Benchmark, agent: SingleAgentPolicy):
28+
for step in range(20):
29+
print("=" * 40)
30+
print(f"Start agent step {step}:")
31+
observation = benchmark.observe()["template_env"]
32+
print(f"Current environment observation: {observation}")
33+
response = agent.chat(
34+
{
35+
"template_env": [
36+
(f"Current environment observation: {observation}", 0),
37+
]
38+
}
39+
)
40+
print(colored(f"Agent take action: {response}", "blue"))
41+
42+
for action in response:
43+
response = benchmark.step(
44+
action=action.name,
45+
parameters=action.arguments,
46+
env_name=action.env,
47+
)
48+
print(
49+
colored(
50+
f'Action "{action.name}" success, stat: '
51+
f"{response.evaluation_results}",
52+
"green",
53+
)
54+
)
55+
if response.terminated:
56+
print("=" * 40)
57+
print(
58+
colored(
59+
f"Task finished, result: {response.evaluation_results}",
60+
"green"
61+
)
62+
)
63+
return
64+
65+
66+
def prepare_vim_docs():
67+
"""Prepare Vim documentation for RAG"""
68+
print(colored("Starting Vim documentation preparation...", "yellow"))
69+
base_url = "https://vimdoc.sourceforge.net/htmldoc/usr_07.html"
70+
content_dir = "vim_docs"
71+
os.makedirs(content_dir, exist_ok=True)
72+
73+
print(colored("Fetching main page...", "yellow"))
74+
response = requests.get(base_url)
75+
soup = BeautifulSoup(response.text, 'html.parser')
76+
77+
# Process the main page first
78+
main_content = soup.get_text(separator='\n', strip=True)
79+
with open(os.path.join(content_dir, "main.txt"), 'w', encoding='utf-8') as f:
80+
f.write(f"Source: {base_url}\n\n{main_content}")
81+
82+
links = [link for link in soup.find_all('a')
83+
if link.get('href') and not link.get('href').startswith(('#', 'http'))]
84+
total_links = len(links)
85+
print(colored(f"Found {total_links} documentation pages to process", "yellow"))
86+
87+
processed_files = []
88+
for idx, link in enumerate(links, 1):
89+
href = link.get('href')
90+
full_url = urljoin(base_url, href)
91+
try:
92+
print(colored(f"Processing page {idx}/{total_links}: {href}", "yellow"))
93+
94+
# Fetch and process page
95+
page_response = requests.get(full_url)
96+
page_soup = BeautifulSoup(page_response.text, 'html.parser')
97+
for tag in page_soup(['script', 'style']):
98+
tag.decompose()
99+
content = page_soup.get_text(separator='\n', strip=True)
100+
101+
# Save content
102+
filename = os.path.join(content_dir, f"{href.replace('/', '_')}.txt")
103+
with open(filename, 'w', encoding='utf-8') as f:
104+
f.write(f"Source: {full_url}\n\n{content}")
105+
processed_files.append(filename)
106+
print(colored(f"✓ Saved {href}", "green"))
107+
108+
except Exception as e:
109+
print(colored(f"✗ Error processing {full_url}: {e}", "red"))
110+
111+
print(colored("Documentation preparation completed!", "green"))
112+
return processed_files
113+
114+
115+
if __name__ == "__main__":
116+
print(colored("=== Starting RAG-enhanced benchmark ===", "cyan"))
117+
118+
# Initialize benchmark and environment
119+
print(colored("\nInitializing benchmark environment...", "yellow"))
120+
benchmark = create_benchmark(template_benchmark_config)
121+
task, action_space = benchmark.start_task("0")
122+
env_descriptions = benchmark.get_env_descriptions()
123+
124+
doc_files = prepare_vim_docs()
125+
126+
print(colored("\nInitializing RAG model...", "yellow"))
127+
rag_model = CamelRAGModel(
128+
model="gpt-4o",
129+
model_platform=ModelPlatformType.OPENAI,
130+
parameters={"temperature": 0.7}
131+
)
132+
133+
print(colored("Processing documents for RAG...", "yellow"))
134+
for doc_file in doc_files:
135+
print(colored(f"Processing {doc_file}...", "yellow"))
136+
rag_model.process_documents(doc_file)
137+
print(colored("RAG model initialization complete!", "green"))
138+
139+
print(colored("\nSetting up agent...", "yellow"))
140+
agent = SingleAgentPolicy(model_backend=rag_model)
141+
agent.reset(task.description, action_space, env_descriptions)
142+
143+
print(colored("\nStarting benchmark execution:", "cyan"))
144+
print("Start performing task: " + colored(f'"{task.description}"', "green"))
145+
start_benchmark(benchmark, agent)
146+
benchmark.reset()

0 commit comments

Comments
 (0)